YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/postgres/src/backend/utils/mmgr/slab.c
Line
Count
Source (jump to first uncovered line)
1
/*-------------------------------------------------------------------------
2
 *
3
 * slab.c
4
 *    SLAB allocator definitions.
5
 *
6
 * SLAB is a MemoryContext implementation designed for cases where large
7
 * numbers of equally-sized objects are allocated (and freed).
8
 *
9
 *
10
 * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group
11
 *
12
 * IDENTIFICATION
13
 *    src/backend/utils/mmgr/slab.c
14
 *
15
 *
16
 * NOTE:
17
 *  The constant allocation size allows significant simplification and various
18
 *  optimizations over more general purpose allocators. The blocks are carved
19
 *  into chunks of exactly the right size (plus alignment), not wasting any
20
 *  memory.
21
 *
22
 *  The information about free chunks is maintained both at the block level and
23
 *  global (context) level. This is possible as the chunk size (and thus also
24
 *  the number of chunks per block) is fixed.
25
 *
26
 *  On each block, free chunks are tracked in a simple linked list. Contents
27
 *  of free chunks is replaced with an index of the next free chunk, forming
28
 *  a very simple linked list. Each block also contains a counter of free
29
 *  chunks. Combined with the local block-level freelist, it makes it trivial
30
 *  to eventually free the whole block.
31
 *
32
 *  At the context level, we use 'freelist' to track blocks ordered by number
33
 *  of free chunks, starting with blocks having a single allocated chunk, and
34
 *  with completely full blocks on the tail.
35
 *
36
 *  This also allows various optimizations - for example when searching for
37
 *  free chunk, the allocator reuses space from the fullest blocks first, in
38
 *  the hope that some of the less full blocks will get completely empty (and
39
 *  returned back to the OS).
40
 *
41
 *  For each block, we maintain pointer to the first free chunk - this is quite
42
 *  cheap and allows us to skip all the preceding used chunks, eliminating
43
 *  a significant number of lookups in many common usage patters. In the worst
44
 *  case this performs as if the pointer was not maintained.
45
 *
46
 *  We cache the freelist index for the blocks with the fewest free chunks
47
 *  (minFreeChunks), so that we don't have to search the freelist on every
48
 *  SlabAlloc() call, which is quite expensive.
49
 *
50
 *-------------------------------------------------------------------------
51
 */
52
53
#include "postgres.h"
54
55
#include "utils/memdebug.h"
56
#include "utils/memutils.h"
57
#include "lib/ilist.h"
58
59
60
/*
61
 * SlabContext is a specialized implementation of MemoryContext.
62
 */
63
typedef struct SlabContext
64
{
65
  MemoryContextData header; /* Standard memory-context fields */
66
  /* Allocation parameters for this context: */
67
  Size    chunkSize;    /* chunk size */
68
  Size    fullChunkSize;  /* chunk size including header and alignment */
69
  Size    blockSize;    /* block size */
70
  Size    headerSize;   /* allocated size of context header */
71
  int     chunksPerBlock; /* number of chunks per block */
72
  int     minFreeChunks;  /* min number of free chunks in any block */
73
  int     nblocks;    /* number of blocks allocated */
74
  /* blocks with free space, grouped by number of free chunks: */
75
  dlist_head  freelist[FLEXIBLE_ARRAY_MEMBER];
76
} SlabContext;
77
78
/*
79
 * SlabBlock
80
 *    Structure of a single block in SLAB allocator.
81
 *
82
 * node: doubly-linked list of blocks in global freelist
83
 * nfree: number of free chunks in this block
84
 * firstFreeChunk: index of the first free chunk
85
 */
86
typedef struct SlabBlock
87
{
88
  dlist_node  node;     /* doubly-linked list */
89
  int     nfree;      /* number of free chunks */
90
  int     firstFreeChunk; /* index of the first free chunk in the block */
91
} SlabBlock;
92
93
/*
94
 * SlabChunk
95
 *    The prefix of each piece of memory in a SlabBlock
96
 *
97
 * Note: to meet the memory context APIs, the payload area of the chunk must
98
 * be maxaligned, and the "slab" link must be immediately adjacent to the
99
 * payload area (cf. GetMemoryChunkContext).  Since we support no machines on
100
 * which MAXALIGN is more than twice sizeof(void *), this happens without any
101
 * special hacking in this struct declaration.  But there is a static
102
 * assertion below that the alignment is done correctly.
103
 */
104
typedef struct SlabChunk
105
{
106
  SlabBlock  *block;      /* block owning this chunk */
107
  SlabContext *slab;      /* owning context */
108
  /* there must not be any padding to reach a MAXALIGN boundary here! */
109
} SlabChunk;
110
111
112
#define SlabPointerGetChunk(ptr)  \
113
0
  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
114
#define SlabChunkGetPointer(chk)  \
115
0
  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
116
#define SlabBlockGetChunk(slab, block, idx) \
117
0
  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock)  \
118
0
          + (idx * slab->fullChunkSize)))
119
#define SlabBlockStart(block) \
120
0
  ((char *) block + sizeof(SlabBlock))
121
#define SlabChunkIndex(slab, block, chunk)  \
122
0
  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
123
124
/*
125
 * These functions implement the MemoryContext API for Slab contexts.
126
 */
127
static void *SlabAlloc(MemoryContext context, Size size);
128
static void SlabFree(MemoryContext context, void *pointer);
129
static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
130
static void SlabReset(MemoryContext context);
131
static void SlabDelete(MemoryContext context);
132
static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
133
static bool SlabIsEmpty(MemoryContext context);
134
static void SlabStats(MemoryContext context,
135
      MemoryStatsPrintFunc printfunc, void *passthru,
136
      MemoryContextCounters *totals);
137
#ifdef MEMORY_CONTEXT_CHECKING
138
static void SlabCheck(MemoryContext context);
139
#endif
140
141
/*
142
 * This is the virtual function table for Slab contexts.
143
 */
144
static const MemoryContextMethods SlabMethods = {
145
  SlabAlloc,
146
  SlabFree,
147
  SlabRealloc,
148
  SlabReset,
149
  SlabDelete,
150
  SlabGetChunkSpace,
151
  SlabIsEmpty,
152
  SlabStats
153
#ifdef MEMORY_CONTEXT_CHECKING
154
  ,SlabCheck
155
#endif
156
};
157
158
/* ----------
159
 * Debug macros
160
 * ----------
161
 */
162
#ifdef HAVE_ALLOCINFO
163
#define SlabFreeInfo(_cxt, _chunk) \
164
      fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
165
        (_cxt)->header.name, (_chunk), (_chunk)->header.size)
166
#define SlabAllocInfo(_cxt, _chunk) \
167
      fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
168
        (_cxt)->header.name, (_chunk), (_chunk)->header.size)
169
#else
170
#define SlabFreeInfo(_cxt, _chunk)
171
#define SlabAllocInfo(_cxt, _chunk)
172
#endif
173
174
175
/*
176
 * SlabContextCreate
177
 *    Create a new Slab context.
178
 *
179
 * parent: parent context, or NULL if top-level context
180
 * name: name of context (must be statically allocated)
181
 * blockSize: allocation block size
182
 * chunkSize: allocation chunk size
183
 *
184
 * The chunkSize may not exceed:
185
 *    MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - SLAB_CHUNKHDRSZ
186
 */
187
MemoryContext
188
SlabContextCreate(MemoryContext parent,
189
          const char *name,
190
          Size blockSize,
191
          Size chunkSize)
192
0
{
193
0
  int     chunksPerBlock;
194
0
  Size    fullChunkSize;
195
0
  Size    freelistSize;
196
0
  Size    headerSize;
197
0
  SlabContext *slab;
198
0
  int     i;
199
200
  /* Assert we padded SlabChunk properly */
201
0
  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
202
0
           "sizeof(SlabChunk) is not maxaligned");
203
0
  StaticAssertStmt(offsetof(SlabChunk, slab) + sizeof(MemoryContext) ==
204
0
           sizeof(SlabChunk),
205
0
           "padding calculation in SlabChunk is wrong");
206
207
  /* Make sure the linked list node fits inside a freed chunk */
208
0
  if (chunkSize < sizeof(int))
209
0
    chunkSize = sizeof(int);
210
211
  /* chunk, including SLAB header (both addresses nicely aligned) */
212
0
  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
213
214
  /* Make sure the block can store at least one chunk. */
215
0
  if (blockSize < fullChunkSize + sizeof(SlabBlock))
216
0
    elog(ERROR, "block size %zu for slab is too small for %zu chunks",
217
0
       blockSize, chunkSize);
218
219
  /* Compute maximum number of chunks per block */
220
0
  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
221
222
  /* The freelist starts with 0, ends with chunksPerBlock. */
223
0
  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
224
225
  /*
226
   * Allocate the context header.  Unlike aset.c, we never try to combine
227
   * this with the first regular block; not worth the extra complication.
228
   */
229
230
  /* Size of the memory context header */
231
0
  headerSize = offsetof(SlabContext, freelist) + freelistSize;
232
233
0
  slab = (SlabContext *) malloc(headerSize);
234
0
  if (slab == NULL)
235
0
  {
236
0
    MemoryContextStats(TopMemoryContext);
237
0
    ereport(ERROR,
238
0
        (errcode(ERRCODE_OUT_OF_MEMORY),
239
0
         errmsg("out of memory"),
240
0
         errdetail("Failed while creating memory context \"%s\".",
241
0
               name)));
242
0
  }
243
244
  /*
245
   * Avoid writing code that can fail between here and MemoryContextCreate;
246
   * we'd leak the header if we ereport in this stretch.
247
   */
248
249
  /* Fill in SlabContext-specific header fields */
250
0
  slab->chunkSize = chunkSize;
251
0
  slab->fullChunkSize = fullChunkSize;
252
0
  slab->blockSize = blockSize;
253
0
  slab->headerSize = headerSize;
254
0
  slab->chunksPerBlock = chunksPerBlock;
255
0
  slab->minFreeChunks = 0;
256
0
  slab->nblocks = 0;
257
258
  /* initialize the freelist slots */
259
0
  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
260
0
    dlist_init(&slab->freelist[i]);
261
262
  /* Finally, do the type-independent part of context creation */
263
0
  MemoryContextCreate((MemoryContext) slab,
264
0
            T_SlabContext,
265
0
            &SlabMethods,
266
0
            parent,
267
0
            name);
268
269
0
  return (MemoryContext) slab;
270
0
}
271
272
/*
273
 * SlabReset
274
 *    Frees all memory which is allocated in the given set.
275
 *
276
 * The code simply frees all the blocks in the context - we don't keep any
277
 * keeper blocks or anything like that.
278
 */
279
static void
280
SlabReset(MemoryContext context)
281
0
{
282
0
  int     i;
283
0
  SlabContext *slab = castNode(SlabContext, context);
284
285
0
  Assert(slab);
286
287
0
#ifdef MEMORY_CONTEXT_CHECKING
288
  /* Check for corruption and leaks before freeing */
289
0
  SlabCheck(context);
290
0
#endif
291
292
  /* walk over freelists and free the blocks */
293
0
  for (i = 0; i <= slab->chunksPerBlock; i++)
294
0
  {
295
0
    dlist_mutable_iter miter;
296
297
0
    dlist_foreach_modify(miter, &slab->freelist[i])
298
0
    {
299
0
      SlabBlock  *block = dlist_container(SlabBlock, node, miter.cur);
300
301
0
      dlist_delete(miter.cur);
302
303
0
#ifdef CLOBBER_FREED_MEMORY
304
0
      wipe_mem(block, slab->blockSize);
305
0
#endif
306
0
      free(block);
307
0
      slab->nblocks--;
308
0
    }
309
0
  }
310
311
0
  slab->minFreeChunks = 0;
312
313
0
  Assert(slab->nblocks == 0);
314
0
}
315
316
/*
317
 * SlabDelete
318
 *    Free all memory which is allocated in the given context.
319
 */
320
static void
321
SlabDelete(MemoryContext context)
322
0
{
323
  /* Reset to release all the SlabBlocks */
324
0
  SlabReset(context);
325
  /* And free the context header */
326
0
  free(context);
327
0
}
328
329
/*
330
 * SlabAlloc
331
 *    Returns pointer to allocated memory of given size or NULL if
332
 *    request could not be completed; memory is added to the slab.
333
 */
334
static void *
335
SlabAlloc(MemoryContext context, Size size)
336
0
{
337
0
  SlabContext *slab = castNode(SlabContext, context);
338
0
  SlabBlock  *block;
339
0
  SlabChunk  *chunk;
340
0
  int     idx;
341
342
0
  Assert(slab);
343
344
0
  Assert((slab->minFreeChunks >= 0) &&
345
0
       (slab->minFreeChunks < slab->chunksPerBlock));
346
347
  /* make sure we only allow correct request size */
348
0
  if (size != slab->chunkSize)
349
0
    elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
350
0
       size, slab->chunkSize);
351
352
  /*
353
   * If there are no free chunks in any existing block, create a new block
354
   * and put it to the last freelist bucket.
355
   *
356
   * slab->minFreeChunks == 0 means there are no blocks with free chunks,
357
   * thanks to how minFreeChunks is updated at the end of SlabAlloc().
358
   */
359
0
  if (slab->minFreeChunks == 0)
360
0
  {
361
0
    block = (SlabBlock *) malloc(slab->blockSize);
362
363
0
    if (block == NULL)
364
0
      return NULL;
365
366
0
    block->nfree = slab->chunksPerBlock;
367
0
    block->firstFreeChunk = 0;
368
369
    /*
370
     * Put all the chunks on a freelist. Walk the chunks and point each
371
     * one to the next one.
372
     */
373
0
    for (idx = 0; idx < slab->chunksPerBlock; idx++)
374
0
    {
375
0
      chunk = SlabBlockGetChunk(slab, block, idx);
376
0
      *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
377
0
    }
378
379
    /*
380
     * And add it to the last freelist with all chunks empty.
381
     *
382
     * We know there are no blocks in the freelist, otherwise we wouldn't
383
     * need a new block.
384
     */
385
0
    Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock]));
386
387
0
    dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
388
389
0
    slab->minFreeChunks = slab->chunksPerBlock;
390
0
    slab->nblocks += 1;
391
0
  }
392
393
  /* grab the block from the freelist (even the new block is there) */
394
0
  block = dlist_head_element(SlabBlock, node,
395
0
                 &slab->freelist[slab->minFreeChunks]);
396
397
  /* make sure we actually got a valid block, with matching nfree */
398
0
  Assert(block != NULL);
399
0
  Assert(slab->minFreeChunks == block->nfree);
400
0
  Assert(block->nfree > 0);
401
402
  /* we know index of the first free chunk in the block */
403
0
  idx = block->firstFreeChunk;
404
405
  /* make sure the chunk index is valid, and that it's marked as empty */
406
0
  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
407
408
  /* compute the chunk location block start (after the block header) */
409
0
  chunk = SlabBlockGetChunk(slab, block, idx);
410
411
  /*
412
   * Update the block nfree count, and also the minFreeChunks as we've
413
   * decreased nfree for a block with the minimum number of free chunks
414
   * (because that's how we chose the block).
415
   */
416
0
  block->nfree--;
417
0
  slab->minFreeChunks = block->nfree;
418
419
  /*
420
   * Remove the chunk from the freelist head. The index of the next free
421
   * chunk is stored in the chunk itself.
422
   */
423
0
  VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
424
0
  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
425
426
0
  Assert(block->firstFreeChunk >= 0);
427
0
  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
428
429
0
  Assert((block->nfree != 0 &&
430
0
      block->firstFreeChunk < slab->chunksPerBlock) ||
431
0
       (block->nfree == 0 &&
432
0
      block->firstFreeChunk == slab->chunksPerBlock));
433
434
  /* move the whole block to the right place in the freelist */
435
0
  dlist_delete(&block->node);
436
0
  dlist_push_head(&slab->freelist[block->nfree], &block->node);
437
438
  /*
439
   * And finally update minFreeChunks, i.e. the index to the block with the
440
   * lowest number of free chunks. We only need to do that when the block
441
   * got full (otherwise we know the current block is the right one). We'll
442
   * simply walk the freelist until we find a non-empty entry.
443
   */
444
0
  if (slab->minFreeChunks == 0)
445
0
  {
446
0
    for (idx = 1; idx <= slab->chunksPerBlock; idx++)
447
0
    {
448
0
      if (dlist_is_empty(&slab->freelist[idx]))
449
0
        continue;
450
451
      /* found a non-empty freelist */
452
0
      slab->minFreeChunks = idx;
453
0
      break;
454
0
    }
455
0
  }
456
457
0
  if (slab->minFreeChunks == slab->chunksPerBlock)
458
0
    slab->minFreeChunks = 0;
459
460
  /* Prepare to initialize the chunk header. */
461
0
  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
462
463
0
  chunk->block = block;
464
0
  chunk->slab = slab;
465
466
0
#ifdef MEMORY_CONTEXT_CHECKING
467
  /* slab mark to catch clobber of "unused" space */
468
0
  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
469
0
  {
470
0
    set_sentinel(SlabChunkGetPointer(chunk), size);
471
0
    VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
472
0
                   sizeof(SlabChunk) + slab->chunkSize,
473
0
                   slab->fullChunkSize -
474
0
                   (slab->chunkSize + sizeof(SlabChunk)));
475
0
  }
476
0
#endif
477
#ifdef RANDOMIZE_ALLOCATED_MEMORY
478
  /* fill the allocated space with junk */
479
  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
480
#endif
481
482
0
  SlabAllocInfo(slab, chunk);
483
0
  return SlabChunkGetPointer(chunk);
484
0
}
485
486
/*
487
 * SlabFree
488
 *    Frees allocated memory; memory is removed from the slab.
489
 */
490
static void
491
SlabFree(MemoryContext context, void *pointer)
492
0
{
493
0
  int     idx;
494
0
  SlabContext *slab = castNode(SlabContext, context);
495
0
  SlabChunk  *chunk = SlabPointerGetChunk(pointer);
496
0
  SlabBlock  *block = chunk->block;
497
498
0
  SlabFreeInfo(slab, chunk);
499
500
0
#ifdef MEMORY_CONTEXT_CHECKING
501
  /* Test for someone scribbling on unused space in chunk */
502
0
  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
503
0
    if (!sentinel_ok(pointer, slab->chunkSize))
504
0
      elog(WARNING, "detected write past chunk end in %s %p",
505
0
         slab->header.name, chunk);
506
0
#endif
507
508
  /* compute index of the chunk with respect to block start */
509
0
  idx = SlabChunkIndex(slab, block, chunk);
510
511
  /* add chunk to freelist, and update block nfree count */
512
0
  *(int32 *) pointer = block->firstFreeChunk;
513
0
  block->firstFreeChunk = idx;
514
0
  block->nfree++;
515
516
0
  Assert(block->nfree > 0);
517
0
  Assert(block->nfree <= slab->chunksPerBlock);
518
519
0
#ifdef CLOBBER_FREED_MEMORY
520
  /* XXX don't wipe the int32 index, used for block-level freelist */
521
0
  wipe_mem((char *) pointer + sizeof(int32),
522
0
       slab->chunkSize - sizeof(int32));
523
0
#endif
524
525
  /* remove the block from a freelist */
526
0
  dlist_delete(&block->node);
527
528
  /*
529
   * See if we need to update the minFreeChunks field for the slab - we only
530
   * need to do that if there the block had that number of free chunks
531
   * before we freed one. In that case, we check if there still are blocks
532
   * in the original freelist and we either keep the current value (if there
533
   * still are blocks) or increment it by one (the new block is still the
534
   * one with minimum free chunks).
535
   *
536
   * The one exception is when the block will get completely free - in that
537
   * case we will free it, se we can't use it for minFreeChunks. It however
538
   * means there are no more blocks with free chunks.
539
   */
540
0
  if (slab->minFreeChunks == (block->nfree - 1))
541
0
  {
542
    /* Have we removed the last chunk from the freelist? */
543
0
    if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
544
0
    {
545
      /* but if we made the block entirely free, we'll free it */
546
0
      if (block->nfree == slab->chunksPerBlock)
547
0
        slab->minFreeChunks = 0;
548
0
      else
549
0
        slab->minFreeChunks++;
550
0
    }
551
0
  }
552
553
  /* If the block is now completely empty, free it. */
554
0
  if (block->nfree == slab->chunksPerBlock)
555
0
  {
556
0
    free(block);
557
0
    slab->nblocks--;
558
0
  }
559
0
  else
560
0
    dlist_push_head(&slab->freelist[block->nfree], &block->node);
561
562
0
  Assert(slab->nblocks >= 0);
563
0
}
564
565
/*
566
 * SlabRealloc
567
 *    Change the allocated size of a chunk.
568
 *
569
 * As Slab is designed for allocating equally-sized chunks of memory, it can't
570
 * do an actual chunk size change.  We try to be gentle and allow calls with
571
 * exactly the same size, as in that case we can simply return the same
572
 * chunk.  When the size differs, we throw an error.
573
 *
574
 * We could also allow requests with size < chunkSize.  That however seems
575
 * rather pointless - Slab is meant for chunks of constant size, and moreover
576
 * realloc is usually used to enlarge the chunk.
577
 */
578
static void *
579
SlabRealloc(MemoryContext context, void *pointer, Size size)
580
0
{
581
0
  SlabContext *slab = castNode(SlabContext, context);
582
583
0
  Assert(slab);
584
585
  /* can't do actual realloc with slab, but let's try to be gentle */
586
0
  if (size == slab->chunkSize)
587
0
    return pointer;
588
589
0
  elog(ERROR, "slab allocator does not support realloc()");
590
0
  return NULL;       /* keep compiler quiet */
591
0
}
592
593
/*
594
 * SlabGetChunkSpace
595
 *    Given a currently-allocated chunk, determine the total space
596
 *    it occupies (including all memory-allocation overhead).
597
 */
598
static Size
599
SlabGetChunkSpace(MemoryContext context, void *pointer)
600
0
{
601
0
  SlabContext *slab = castNode(SlabContext, context);
602
603
0
  Assert(slab);
604
605
0
  return slab->fullChunkSize;
606
0
}
607
608
/*
609
 * SlabIsEmpty
610
 *    Is an Slab empty of any allocated space?
611
 */
612
static bool
613
SlabIsEmpty(MemoryContext context)
614
0
{
615
0
  SlabContext *slab = castNode(SlabContext, context);
616
617
0
  Assert(slab);
618
619
0
  return (slab->nblocks == 0);
620
0
}
621
622
/*
623
 * SlabStats
624
 *    Compute stats about memory consumption of a Slab context.
625
 *
626
 * printfunc: if not NULL, pass a human-readable stats string to this.
627
 * passthru: pass this pointer through to printfunc.
628
 * totals: if not NULL, add stats about this context into *totals.
629
 */
630
static void
631
SlabStats(MemoryContext context,
632
      MemoryStatsPrintFunc printfunc, void *passthru,
633
      MemoryContextCounters *totals)
634
0
{
635
0
  SlabContext *slab = castNode(SlabContext, context);
636
0
  Size    nblocks = 0;
637
0
  Size    freechunks = 0;
638
0
  Size    totalspace;
639
0
  Size    freespace = 0;
640
0
  int     i;
641
642
  /* Include context header in totalspace */
643
0
  totalspace = slab->headerSize;
644
645
0
  for (i = 0; i <= slab->chunksPerBlock; i++)
646
0
  {
647
0
    dlist_iter  iter;
648
649
0
    dlist_foreach(iter, &slab->freelist[i])
650
0
    {
651
0
      SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
652
653
0
      nblocks++;
654
0
      totalspace += slab->blockSize;
655
0
      freespace += slab->fullChunkSize * block->nfree;
656
0
      freechunks += block->nfree;
657
0
    }
658
0
  }
659
660
0
  if (printfunc)
661
0
  {
662
0
    char    stats_string[200];
663
664
0
    snprintf(stats_string, sizeof(stats_string),
665
0
         "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
666
0
         totalspace, nblocks, freespace, freechunks,
667
0
         totalspace - freespace);
668
0
    printfunc(context, passthru, stats_string);
669
0
  }
670
671
0
  if (totals)
672
0
  {
673
0
    totals->nblocks += nblocks;
674
0
    totals->freechunks += freechunks;
675
0
    totals->totalspace += totalspace;
676
0
    totals->freespace += freespace;
677
0
  }
678
0
}
679
680
681
#ifdef MEMORY_CONTEXT_CHECKING
682
683
/*
684
 * SlabCheck
685
 *    Walk through chunks and check consistency of memory.
686
 *
687
 * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
688
 * find yourself in an infinite loop when trouble occurs, because this
689
 * routine will be entered again when elog cleanup tries to release memory!
690
 */
691
static void
692
SlabCheck(MemoryContext context)
693
0
{
694
0
  int     i;
695
0
  SlabContext *slab = castNode(SlabContext, context);
696
0
  const char *name = slab->header.name;
697
0
  char     *freechunks;
698
699
0
  Assert(slab);
700
0
  Assert(slab->chunksPerBlock > 0);
701
702
  /* bitmap of free chunks on a block */
703
0
  freechunks = palloc(slab->chunksPerBlock * sizeof(bool));
704
705
  /* walk all the freelists */
706
0
  for (i = 0; i <= slab->chunksPerBlock; i++)
707
0
  {
708
0
    int     j,
709
0
          nfree;
710
0
    dlist_iter  iter;
711
712
    /* walk all blocks on this freelist */
713
0
    dlist_foreach(iter, &slab->freelist[i])
714
0
    {
715
0
      int     idx;
716
0
      SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
717
718
      /*
719
       * Make sure the number of free chunks (in the block header)
720
       * matches position in the freelist.
721
       */
722
0
      if (block->nfree != i)
723
0
        elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
724
0
           name, block->nfree, block, i);
725
726
      /* reset the bitmap of free chunks for this block */
727
0
      memset(freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
728
0
      idx = block->firstFreeChunk;
729
730
      /*
731
       * Now walk through the chunks, count the free ones and also
732
       * perform some additional checks for the used ones. As the chunk
733
       * freelist is stored within the chunks themselves, we have to
734
       * walk through the chunks and construct our own bitmap.
735
       */
736
737
0
      nfree = 0;
738
0
      while (idx < slab->chunksPerBlock)
739
0
      {
740
0
        SlabChunk  *chunk;
741
742
        /* count the chunk as free, add it to the bitmap */
743
0
        nfree++;
744
0
        freechunks[idx] = true;
745
746
        /* read index of the next free chunk */
747
0
        chunk = SlabBlockGetChunk(slab, block, idx);
748
0
        VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
749
0
        idx = *(int32 *) SlabChunkGetPointer(chunk);
750
0
      }
751
752
0
      for (j = 0; j < slab->chunksPerBlock; j++)
753
0
      {
754
        /* non-zero bit in the bitmap means chunk the chunk is used */
755
0
        if (!freechunks[j])
756
0
        {
757
0
          SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
758
759
          /* chunks have both block and slab pointers, so check both */
760
0
          if (chunk->block != block)
761
0
            elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
762
0
               name, block, chunk);
763
764
0
          if (chunk->slab != slab)
765
0
            elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
766
0
               name, block, chunk);
767
768
          /* there might be sentinel (thanks to alignment) */
769
0
          if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
770
0
            if (!sentinel_ok(chunk, slab->chunkSize))
771
0
              elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
772
0
                 name, block, chunk);
773
0
        }
774
0
      }
775
776
      /*
777
       * Make sure we got the expected number of free chunks (as tracked
778
       * in the block header).
779
       */
780
0
      if (nfree != block->nfree)
781
0
        elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
782
0
           name, block->nfree, block, nfree);
783
0
    }
784
0
  }
785
0
}
786
787
#endif              /* MEMORY_CONTEXT_CHECKING */