YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/postgres/src/backend/storage/buffer/localbuf.c
Line
Count
Source (jump to first uncovered line)
1
/*-------------------------------------------------------------------------
2
 *
3
 * localbuf.c
4
 *    local buffer manager. Fast buffer manager for temporary tables,
5
 *    which never need to be WAL-logged or checkpointed, etc.
6
 *
7
 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8
 * Portions Copyright (c) 1994-5, Regents of the University of California
9
 *
10
 *
11
 * IDENTIFICATION
12
 *    src/backend/storage/buffer/localbuf.c
13
 *
14
 *-------------------------------------------------------------------------
15
 */
16
#include "postgres.h"
17
18
#include "access/parallel.h"
19
#include "catalog/catalog.h"
20
#include "executor/instrument.h"
21
#include "storage/buf_internals.h"
22
#include "storage/bufmgr.h"
23
#include "utils/guc.h"
24
#include "utils/memutils.h"
25
#include "utils/resowner_private.h"
26
27
28
/*#define LBDEBUG*/
29
30
/* entry for buffer lookup hashtable */
31
typedef struct
32
{
33
  BufferTag key;      /* Tag of a disk page */
34
  int     id;       /* Associated local buffer's index */
35
} LocalBufferLookupEnt;
36
37
/* Note: this macro only works on local buffers, not shared ones! */
38
#define LocalBufHdrGetBlock(bufHdr) \
39
1.64k
  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
40
41
int     NLocBuffer = 0;   /* until buffers are initialized */
42
43
BufferDesc *LocalBufferDescriptors = NULL;
44
Block    *LocalBufferBlockPointers = NULL;
45
int32    *LocalRefCount = NULL;
46
47
static int  nextFreeLocalBuf = 0;
48
49
static HTAB *LocalBufHash = NULL;
50
51
52
static void InitLocalBuffers(void);
53
static Block GetLocalBufferStorage(void);
54
55
56
/*
57
 * LocalPrefetchBuffer -
58
 *    initiate asynchronous read of a block of a relation
59
 *
60
 * Do PrefetchBuffer's work for temporary relations.
61
 * No-op if prefetching isn't compiled in.
62
 */
63
void
64
LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
65
          BlockNumber blockNum)
66
0
{
67
#ifdef USE_PREFETCH
68
  BufferTag newTag;     /* identity of requested block */
69
  LocalBufferLookupEnt *hresult;
70
71
  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
72
73
  /* Initialize local buffers if first request in this session */
74
  if (LocalBufHash == NULL)
75
    InitLocalBuffers();
76
77
  /* See if the desired buffer already exists */
78
  hresult = (LocalBufferLookupEnt *)
79
    hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL);
80
81
  if (hresult)
82
  {
83
    /* Yes, so nothing to do */
84
    return;
85
  }
86
87
  /* Not in buffers, so initiate prefetch */
88
  smgrprefetch(smgr, forkNum, blockNum);
89
#endif              /* USE_PREFETCH */
90
0
}
91
92
93
/*
94
 * LocalBufferAlloc -
95
 *    Find or create a local buffer for the given page of the given relation.
96
 *
97
 * API is similar to bufmgr.c's BufferAlloc, except that we do not need
98
 * to do any locking since this is all local.   Also, IO_IN_PROGRESS
99
 * does not get set.  Lastly, we support only default access strategy
100
 * (hence, usage_count is always advanced).
101
 */
102
BufferDesc *
103
LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
104
         bool *foundPtr)
105
83.1k
{
106
83.1k
  BufferTag newTag;     /* identity of requested block */
107
83.1k
  LocalBufferLookupEnt *hresult;
108
83.1k
  BufferDesc *bufHdr;
109
83.1k
  int     b;
110
83.1k
  int     trycounter;
111
83.1k
  bool    found;
112
83.1k
  uint32    buf_state;
113
114
83.1k
  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
115
116
  /* Initialize local buffers if first request in this session */
117
83.1k
  if (LocalBufHash == NULL)
118
49
    InitLocalBuffers();
119
120
  /* See if the desired buffer already exists */
121
83.1k
  hresult = (LocalBufferLookupEnt *)
122
83.1k
    hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL);
123
124
83.1k
  if (hresult)
125
82.2k
  {
126
82.2k
    b = hresult->id;
127
82.2k
    bufHdr = GetLocalBufferDescriptor(b);
128
82.2k
    Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
129
#ifdef LBDEBUG
130
    fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
131
        smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1);
132
#endif
133
82.2k
    buf_state = pg_atomic_read_u32(&bufHdr->state);
134
135
    /* this part is equivalent to PinBuffer for a shared buffer */
136
82.2k
    if (LocalRefCount[b] == 0)
137
81.8k
    {
138
81.8k
      if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
139
2.26k
      {
140
2.26k
        buf_state += BUF_USAGECOUNT_ONE;
141
2.26k
        pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
142
2.26k
      }
143
81.8k
    }
144
82.2k
    LocalRefCount[b]++;
145
82.2k
    ResourceOwnerRememberBuffer(CurrentResourceOwner,
146
82.2k
                  BufferDescriptorGetBuffer(bufHdr));
147
82.2k
    if (buf_state & BM_VALID)
148
82.2k
      *foundPtr = true;
149
0
    else
150
0
    {
151
      /* Previous read attempt must have failed; try again */
152
0
      *foundPtr = false;
153
0
    }
154
82.2k
    return bufHdr;
155
82.2k
  }
156
157
#ifdef LBDEBUG
158
  fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
159
      smgr->smgr_rnode.node.relNode, forkNum, blockNum,
160
      -nextFreeLocalBuf - 1);
161
#endif
162
163
  /*
164
   * Need to get a new buffer.  We use a clock sweep algorithm (essentially
165
   * the same as what freelist.c does now...)
166
   */
167
823
  trycounter = NLocBuffer;
168
823
  for (;;)
169
823
  {
170
823
    b = nextFreeLocalBuf;
171
172
823
    if (++nextFreeLocalBuf >= NLocBuffer)
173
0
      nextFreeLocalBuf = 0;
174
175
823
    bufHdr = GetLocalBufferDescriptor(b);
176
177
823
    if (LocalRefCount[b] == 0)
178
823
    {
179
823
      buf_state = pg_atomic_read_u32(&bufHdr->state);
180
181
823
      if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
182
0
      {
183
0
        buf_state -= BUF_USAGECOUNT_ONE;
184
0
        pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
185
0
        trycounter = NLocBuffer;
186
0
      }
187
823
      else
188
823
      {
189
        /* Found a usable buffer */
190
823
        LocalRefCount[b]++;
191
823
        ResourceOwnerRememberBuffer(CurrentResourceOwner,
192
823
                      BufferDescriptorGetBuffer(bufHdr));
193
823
        break;
194
823
      }
195
823
    }
196
0
    else if (--trycounter == 0)
197
0
      ereport(ERROR,
198
823
          (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
199
823
           errmsg("no empty local buffer available")));
200
823
  }
201
202
  /*
203
   * this buffer is not referenced but it might still be dirty. if that's
204
   * the case, write it out before reusing it!
205
   */
206
823
  if (buf_state & BM_DIRTY)
207
0
  {
208
0
    SMgrRelation oreln;
209
0
    Page    localpage = (char *) LocalBufHdrGetBlock(bufHdr);
210
211
    /* Find smgr relation for buffer */
212
0
    oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
213
214
0
    PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
215
216
    /* And write... */
217
0
    smgrwrite(oreln,
218
0
          bufHdr->tag.forkNum,
219
0
          bufHdr->tag.blockNum,
220
0
          localpage,
221
0
          false);
222
223
    /* Mark not-dirty now in case we error out below */
224
0
    buf_state &= ~BM_DIRTY;
225
0
    pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
226
227
0
    pgBufferUsage.local_blks_written++;
228
0
  }
229
230
  /*
231
   * lazy memory allocation: allocate space on first use of a buffer.
232
   */
233
823
  if (LocalBufHdrGetBlock(bufHdr) == NULL)
234
823
  {
235
    /* Set pointer for use by BufferGetBlock() macro */
236
823
    LocalBufHdrGetBlock(bufHdr) = GetLocalBufferStorage();
237
823
  }
238
239
  /*
240
   * Update the hash table: remove old entry, if any, and make new one.
241
   */
242
823
  if (buf_state & BM_TAG_VALID)
243
0
  {
244
0
    hresult = (LocalBufferLookupEnt *)
245
0
      hash_search(LocalBufHash, (void *) &bufHdr->tag,
246
0
            HASH_REMOVE, NULL);
247
0
    if (!hresult)     /* shouldn't happen */
248
0
      elog(ERROR, "local buffer hash table corrupted");
249
    /* mark buffer invalid just in case hash insert fails */
250
0
    CLEAR_BUFFERTAG(bufHdr->tag);
251
0
    buf_state &= ~(BM_VALID | BM_TAG_VALID);
252
0
    pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
253
0
  }
254
255
823
  hresult = (LocalBufferLookupEnt *)
256
823
    hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found);
257
823
  if (found)          /* shouldn't happen */
258
0
    elog(ERROR, "local buffer hash table corrupted");
259
823
  hresult->id = b;
260
261
  /*
262
   * it's all ours now.
263
   */
264
823
  bufHdr->tag = newTag;
265
823
  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED | BM_IO_ERROR);
266
823
  buf_state |= BM_TAG_VALID;
267
823
  buf_state &= ~BUF_USAGECOUNT_MASK;
268
823
  buf_state += BUF_USAGECOUNT_ONE;
269
823
  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
270
271
823
  *foundPtr = false;
272
823
  return bufHdr;
273
823
}
274
275
/*
276
 * MarkLocalBufferDirty -
277
 *    mark a local buffer dirty
278
 */
279
void
280
MarkLocalBufferDirty(Buffer buffer)
281
91.0k
{
282
91.0k
  int     bufid;
283
91.0k
  BufferDesc *bufHdr;
284
91.0k
  uint32    buf_state;
285
286
91.0k
  Assert(BufferIsLocal(buffer));
287
288
#ifdef LBDEBUG
289
  fprintf(stderr, "LB DIRTY %d\n", buffer);
290
#endif
291
292
91.0k
  bufid = -(buffer + 1);
293
294
91.0k
  Assert(LocalRefCount[bufid] > 0);
295
296
91.0k
  bufHdr = GetLocalBufferDescriptor(bufid);
297
298
91.0k
  buf_state = pg_atomic_read_u32(&bufHdr->state);
299
300
91.0k
  if (!(buf_state & BM_DIRTY))
301
761
    pgBufferUsage.local_blks_dirtied++;
302
303
91.0k
  buf_state |= BM_DIRTY;
304
305
91.0k
  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
306
91.0k
}
307
308
/*
309
 * DropRelFileNodeLocalBuffers
310
 *    This function removes from the buffer pool all the pages of the
311
 *    specified relation that have block numbers >= firstDelBlock.
312
 *    (In particular, with firstDelBlock = 0, all pages are removed.)
313
 *    Dirty pages are simply dropped, without bothering to write them
314
 *    out first.  Therefore, this is NOT rollback-able, and so should be
315
 *    used only with extreme caution!
316
 *
317
 *    See DropRelFileNodeBuffers in bufmgr.c for more notes.
318
 */
319
void
320
DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
321
              BlockNumber firstDelBlock)
322
5
{
323
5
  int     i;
324
325
5.12k
  for (i = 0; i < NLocBuffer; 
i++5.12k
)
326
5.12k
  {
327
5.12k
    BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
328
5.12k
    LocalBufferLookupEnt *hresult;
329
5.12k
    uint32    buf_state;
330
331
5.12k
    buf_state = pg_atomic_read_u32(&bufHdr->state);
332
333
5.12k
    if ((buf_state & BM_TAG_VALID) &&
334
5.12k
      
RelFileNodeEquals7
(bufHdr->tag.rnode, rnode) &&
335
5.12k
      
bufHdr->tag.forkNum == forkNum2
&&
336
5.12k
      
bufHdr->tag.blockNum >= firstDelBlock2
)
337
2
    {
338
2
      if (LocalRefCount[i] != 0)
339
0
        elog(ERROR, "block %u of %s is still referenced (local %u)",
340
2
           bufHdr->tag.blockNum,
341
2
           relpathbackend(bufHdr->tag.rnode, MyBackendId,
342
2
                  bufHdr->tag.forkNum),
343
2
           LocalRefCount[i]);
344
      /* Remove entry from hashtable */
345
2
      hresult = (LocalBufferLookupEnt *)
346
2
        hash_search(LocalBufHash, (void *) &bufHdr->tag,
347
2
              HASH_REMOVE, NULL);
348
2
      if (!hresult)    /* shouldn't happen */
349
0
        elog(ERROR, "local buffer hash table corrupted");
350
      /* Mark buffer invalid */
351
2
      CLEAR_BUFFERTAG(bufHdr->tag);
352
2
      buf_state &= ~BUF_FLAG_MASK;
353
2
      buf_state &= ~BUF_USAGECOUNT_MASK;
354
2
      pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
355
2
    }
356
5.12k
  }
357
5
}
358
359
/*
360
 * DropRelFileNodeAllLocalBuffers
361
 *    This function removes from the buffer pool all pages of all forks
362
 *    of the specified relation.
363
 *
364
 *    See DropRelFileNodeAllBuffers in bufmgr.c for more notes.
365
 */
366
void
367
DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
368
95
{
369
95
  int     i;
370
371
77.9k
  for (i = 0; i < NLocBuffer; 
i++77.8k
)
372
77.8k
  {
373
77.8k
    BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
374
77.8k
    LocalBufferLookupEnt *hresult;
375
77.8k
    uint32    buf_state;
376
377
77.8k
    buf_state = pg_atomic_read_u32(&bufHdr->state);
378
379
77.8k
    if ((buf_state & BM_TAG_VALID) &&
380
77.8k
      
RelFileNodeEquals1.61k
(bufHdr->tag.rnode, rnode))
381
495
    {
382
495
      if (LocalRefCount[i] != 0)
383
0
        elog(ERROR, "block %u of %s is still referenced (local %u)",
384
495
           bufHdr->tag.blockNum,
385
495
           relpathbackend(bufHdr->tag.rnode, MyBackendId,
386
495
                  bufHdr->tag.forkNum),
387
495
           LocalRefCount[i]);
388
      /* Remove entry from hashtable */
389
495
      hresult = (LocalBufferLookupEnt *)
390
495
        hash_search(LocalBufHash, (void *) &bufHdr->tag,
391
495
              HASH_REMOVE, NULL);
392
495
      if (!hresult)    /* shouldn't happen */
393
0
        elog(ERROR, "local buffer hash table corrupted");
394
      /* Mark buffer invalid */
395
495
      CLEAR_BUFFERTAG(bufHdr->tag);
396
495
      buf_state &= ~BUF_FLAG_MASK;
397
495
      buf_state &= ~BUF_USAGECOUNT_MASK;
398
495
      pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
399
495
    }
400
77.8k
  }
401
95
}
402
403
/*
404
 * InitLocalBuffers -
405
 *    init the local buffer cache. Since most queries (esp. multi-user ones)
406
 *    don't involve local buffers, we delay allocating actual memory for the
407
 *    buffers until we need them; just make the buffer headers here.
408
 */
409
static void
410
InitLocalBuffers(void)
411
49
{
412
49
  int     nbufs = num_temp_buffers;
413
49
  HASHCTL   info;
414
49
  int     i;
415
416
  /*
417
   * Parallel workers can't access data in temporary tables, because they
418
   * have no visibility into the local buffers of their leader.  This is a
419
   * convenient, low-cost place to provide a backstop check for that.  Note
420
   * that we don't wish to prevent a parallel worker from accessing catalog
421
   * metadata about a temp table, so checks at higher levels would be
422
   * inappropriate.
423
   */
424
49
  if (IsParallelWorker())
425
49
    ereport(ERROR,
426
49
        (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
427
49
         errmsg("cannot access temporary tables during a parallel operation")));
428
429
  /* Allocate and zero buffer headers and auxiliary arrays */
430
49
  LocalBufferDescriptors = (BufferDesc *) calloc(nbufs, sizeof(BufferDesc));
431
49
  LocalBufferBlockPointers = (Block *) calloc(nbufs, sizeof(Block));
432
49
  LocalRefCount = (int32 *) calloc(nbufs, sizeof(int32));
433
49
  if (!LocalBufferDescriptors || !LocalBufferBlockPointers || !LocalRefCount)
434
49
    ereport(FATAL,
435
49
        (errcode(ERRCODE_OUT_OF_MEMORY),
436
49
         errmsg("out of memory")));
437
438
49
  nextFreeLocalBuf = 0;
439
440
  /* initialize fields that need to start off nonzero */
441
50.2k
  for (i = 0; i < nbufs; 
i++50.1k
)
442
50.1k
  {
443
50.1k
    BufferDesc *buf = GetLocalBufferDescriptor(i);
444
445
    /*
446
     * negative to indicate local buffer. This is tricky: shared buffers
447
     * start with 0. We have to start with -2. (Note that the routine
448
     * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
449
     * is -1.)
450
     */
451
50.1k
    buf->buf_id = -i - 2;
452
453
    /*
454
     * Intentionally do not initialize the buffer's atomic variable
455
     * (besides zeroing the underlying memory above). That way we get
456
     * errors on platforms without atomics, if somebody (re-)introduces
457
     * atomic operations for local buffers.
458
     */
459
50.1k
  }
460
461
  /* Create the lookup hash table */
462
49
  MemSet(&info, 0, sizeof(info));
463
49
  info.keysize = sizeof(BufferTag);
464
49
  info.entrysize = sizeof(LocalBufferLookupEnt);
465
466
49
  LocalBufHash = hash_create("Local Buffer Lookup Table",
467
49
                 nbufs,
468
49
                 &info,
469
49
                 HASH_ELEM | HASH_BLOBS);
470
471
49
  if (!LocalBufHash)
472
0
    elog(ERROR, "could not initialize local buffer hash table");
473
474
  /* Initialization done, mark buffers allocated */
475
49
  NLocBuffer = nbufs;
476
49
}
477
478
/*
479
 * GetLocalBufferStorage - allocate memory for a local buffer
480
 *
481
 * The idea of this function is to aggregate our requests for storage
482
 * so that the memory manager doesn't see a whole lot of relatively small
483
 * requests.  Since we'll never give back a local buffer once it's created
484
 * within a particular process, no point in burdening memmgr with separately
485
 * managed chunks.
486
 */
487
static Block
488
GetLocalBufferStorage(void)
489
823
{
490
823
  static char *cur_block = NULL;
491
823
  static int  next_buf_in_block = 0;
492
823
  static int  num_bufs_in_block = 0;
493
823
  static int  total_bufs_allocated = 0;
494
823
  static MemoryContext LocalBufferContext = NULL;
495
496
823
  char     *this_buf;
497
498
823
  Assert(total_bufs_allocated < NLocBuffer);
499
500
823
  if (next_buf_in_block >= num_bufs_in_block)
501
61
  {
502
    /* Need to make a new request to memmgr */
503
61
    int     num_bufs;
504
505
    /*
506
     * We allocate local buffers in a context of their own, so that the
507
     * space eaten for them is easily recognizable in MemoryContextStats
508
     * output.  Create the context on first use.
509
     */
510
61
    if (LocalBufferContext == NULL)
511
49
      LocalBufferContext =
512
49
        AllocSetContextCreate(TopMemoryContext,
513
61
                    "LocalBufferContext",
514
61
                    ALLOCSET_DEFAULT_SIZES);
515
516
    /* Start with a 16-buffer request; subsequent ones double each time */
517
61
    num_bufs = Max(num_bufs_in_block * 2, 16);
518
    /* But not more than what we need for all remaining local bufs */
519
61
    num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated);
520
    /* And don't overflow MaxAllocSize, either */
521
61
    num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
522
523
61
    cur_block = (char *) MemoryContextAlloc(LocalBufferContext,
524
61
                        num_bufs * BLCKSZ);
525
61
    next_buf_in_block = 0;
526
61
    num_bufs_in_block = num_bufs;
527
61
  }
528
529
  /* Allocate next buffer in current memory block */
530
823
  this_buf = cur_block + next_buf_in_block * BLCKSZ;
531
823
  next_buf_in_block++;
532
823
  total_bufs_allocated++;
533
534
823
  return (Block) this_buf;
535
823
}
536
537
/*
538
 * CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
539
 *
540
 * This is just like CheckBufferLeaks(), but for local buffers.
541
 */
542
static void
543
CheckForLocalBufferLeaks(void)
544
415k
{
545
415k
#ifdef USE_ASSERT_CHECKING
546
415k
  if (LocalRefCount)
547
4.86k
  {
548
4.86k
    int     RefCountErrors = 0;
549
4.86k
    int     i;
550
551
4.98M
    for (i = 0; i < NLocBuffer; 
i++4.98M
)
552
4.98M
    {
553
4.98M
      if (LocalRefCount[i] != 0)
554
0
      {
555
0
        Buffer    b = -i - 1;
556
557
0
        PrintBufferLeakWarning(b);
558
0
        RefCountErrors++;
559
0
      }
560
4.98M
    }
561
4.86k
    Assert(RefCountErrors == 0);
562
4.86k
  }
563
415k
#endif
564
415k
}
565
566
/*
567
 * AtEOXact_LocalBuffers - clean up at end of transaction.
568
 *
569
 * This is just like AtEOXact_Buffers, but for local buffers.
570
 */
571
void
572
AtEOXact_LocalBuffers(bool isCommit)
573
403k
{
574
403k
  CheckForLocalBufferLeaks();
575
403k
}
576
577
/*
578
 * AtProcExit_LocalBuffers - ensure we have dropped pins during backend exit.
579
 *
580
 * This is just like AtProcExit_Buffers, but for local buffers.
581
 */
582
void
583
AtProcExit_LocalBuffers(void)
584
12.0k
{
585
  /*
586
   * We shouldn't be holding any remaining pins; if we are, and assertions
587
   * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
588
   * to drop the temp rels.
589
   */
590
12.0k
  CheckForLocalBufferLeaks();
591
12.0k
}