YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/rocksdb/db/table_cache.cc
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under the BSD-style license found in the
3
//  LICENSE file in the root directory of this source tree. An additional grant
4
//  of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
21
// Use of this source code is governed by a BSD-style license that can be
22
// found in the LICENSE file. See the AUTHORS file for names of contributors.
23
24
#include "yb/rocksdb/db/table_cache.h"
25
26
#include "yb/rocksdb/db/dbformat.h"
27
#include "yb/rocksdb/db/filename.h"
28
#include "yb/rocksdb/db/version_edit.h"
29
#include "yb/rocksdb/statistics.h"
30
#include "yb/rocksdb/table.h"
31
#include "yb/rocksdb/table/get_context.h"
32
#include "yb/rocksdb/table/internal_iterator.h"
33
#include "yb/rocksdb/table/iterator_wrapper.h"
34
#include "yb/rocksdb/table/table_builder.h"
35
#include "yb/rocksdb/table/table_reader.h"
36
#include "yb/rocksdb/util/coding.h"
37
#include "yb/rocksdb/util/file_reader_writer.h"
38
#include "yb/rocksdb/util/perf_context_imp.h"
39
#include "yb/rocksdb/util/sync_point.h"
40
41
#include "yb/util/logging.h"
42
#include "yb/util/stats/perf_step_timer.h"
43
44
namespace rocksdb {
45
46
namespace {
47
48
template <class T>
49
112k
static void DeleteEntry(const Slice& key, void* value) {
50
112k
  T* typed_value = reinterpret_cast<T*>(value);
51
112k
  delete typed_value;
52
112k
}
table_cache.cc:_ZN7rocksdb12_GLOBAL__N_111DeleteEntryINS_11TableReaderEEEvRKN2yb5SliceEPv
Line
Count
Source
49
69.5k
static void DeleteEntry(const Slice& key, void* value) {
50
69.5k
  T* typed_value = reinterpret_cast<T*>(value);
51
69.5k
  delete typed_value;
52
69.5k
}
table_cache.cc:_ZN7rocksdb12_GLOBAL__N_111DeleteEntryINSt3__112basic_stringIcNS2_11char_traitsIcEENS2_9allocatorIcEEEEEEvRKN2yb5SliceEPv
Line
Count
Source
49
42.7k
static void DeleteEntry(const Slice& key, void* value) {
50
42.7k
  T* typed_value = reinterpret_cast<T*>(value);
51
42.7k
  delete typed_value;
52
42.7k
}
53
54
3.75M
static void UnrefEntry(void* arg1, void* arg2) {
55
3.75M
  Cache* cache = reinterpret_cast<Cache*>(arg1);
56
3.75M
  Cache::Handle* h = reinterpret_cast<Cache::Handle*>(arg2);
57
3.75M
  cache->Release(h);
58
3.75M
}
59
60
465
static void DeleteTableReader(void* arg1, void* arg2) {
61
465
  TableReader* table_reader = reinterpret_cast<TableReader*>(arg1);
62
465
  delete table_reader;
63
465
}
64
65
14.8M
static Slice GetSliceForFileNumber(const uint64_t* file_number) {
66
14.8M
  return Slice(reinterpret_cast<const char*>(file_number),
67
14.8M
      sizeof(*file_number));
68
14.8M
}
69
70
#ifndef ROCKSDB_LITE
71
72
103k
void AppendVarint64(IterKey* key, uint64_t v) {
73
103k
  char buf[10];
74
103k
  auto ptr = EncodeVarint64(buf, v);
75
103k
  key->TrimAppend(key->Size(), buf, ptr - buf);
76
103k
}
77
78
#endif  // ROCKSDB_LITE
79
80
}  // namespace
81
82
TableCache::TableCache(const ImmutableCFOptions& ioptions,
83
    const EnvOptions& env_options, Cache* const cache)
84
347k
    : ioptions_(ioptions), env_options_(env_options), cache_(cache) {
85
347k
  if (ioptions_.row_cache) {
86
    // If the same cache is shared by multiple instances, we need to
87
    // disambiguate its entries.
88
332
    PutVarint64(&row_cache_id_, ioptions_.row_cache->NewId());
89
332
  }
90
347k
}
91
92
328k
TableCache::~TableCache() {
93
328k
}
94
95
14.7M
TableReader* TableCache::GetTableReaderFromHandle(Cache::Handle* handle) {
96
14.7M
  return reinterpret_cast<TableReader*>(cache_->Value(handle));
97
14.7M
}
98
99
8.30M
void TableCache::ReleaseHandle(Cache::Handle* handle) {
100
8.30M
  cache_->Release(handle);
101
8.30M
}
102
103
namespace {
104
105
Status NewFileReader(const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
106
    const std::string& fname, bool sequential_mode, bool record_read_stats,
107
140k
    HistogramImpl* file_read_hist, std::unique_ptr<RandomAccessFileReader>* file_reader) {
108
140k
  unique_ptr<RandomAccessFile> file;
109
110
140k
  Status s = ioptions.env->NewRandomAccessFile(fname, &file, env_options);
111
140k
  if (!s.ok()) {
112
521
    return s;
113
521
  }
114
140k
  RecordTick(ioptions.statistics, NO_FILE_OPENS);
115
116
140k
  if (sequential_mode && ioptions.compaction_readahead_size > 0) {
117
922
    file = NewReadaheadRandomAccessFile(std::move(file), ioptions.compaction_readahead_size);
118
922
  }
119
140k
  if (!sequential_mode && ioptions.advise_random_on_open) {
120
139k
    file->Hint(RandomAccessFile::RANDOM);
121
139k
  }
122
123
140k
  file_reader->reset(new RandomAccessFileReader(
124
140k
      std::move(file),
125
140k
      ioptions.env,
126
139k
      record_read_stats ? ioptions.statistics : nullptr,
127
140k
      SST_READ_MICROS,
128
140k
      file_read_hist));
129
140k
  return Status::OK();
130
140k
}
131
132
} // anonymous namespace
133
134
Status TableCache::DoGetTableReader(
135
    const EnvOptions& env_options,
136
    const InternalKeyComparatorPtr& internal_comparator, const FileDescriptor& fd,
137
    bool sequential_mode, bool record_read_stats, HistogramImpl* file_read_hist,
138
71.8k
    unique_ptr<TableReader>* table_reader, bool skip_filters) {
139
71.8k
  const std::string base_fname = TableFileName(ioptions_.db_paths, fd.GetNumber(), fd.GetPathId());
140
141
71.8k
  Status s;
142
71.8k
  {
143
71.8k
    unique_ptr<RandomAccessFileReader> base_file_reader;
144
71.8k
    s = NewFileReader(ioptions_, env_options, base_fname, sequential_mode, record_read_stats,
145
71.8k
        file_read_hist, &base_file_reader);
146
71.8k
    if (!s.ok()) {
147
520
      return s;
148
520
    }
149
71.3k
    s = ioptions_.table_factory->NewTableReader(
150
71.3k
        TableReaderOptions(ioptions_, env_options, internal_comparator, skip_filters),
151
71.3k
        std::move(base_file_reader), fd.GetBaseFileSize(), table_reader);
152
71.3k
    if (!s.ok()) {
153
34
      return s;
154
34
    }
155
71.3k
  }
156
157
71.3k
  if ((*table_reader)->IsSplitSst()) {
158
68.8k
    const std::string data_fname = TableBaseToDataFileName(base_fname);
159
68.8k
    std::unique_ptr<RandomAccessFileReader> data_file_reader;
160
68.8k
    s = NewFileReader(ioptions_, env_options, data_fname, sequential_mode, record_read_stats,
161
68.8k
        file_read_hist, &data_file_reader);
162
68.8k
    if (!s.ok()) {
163
1
      return s;
164
1
    }
165
68.8k
    (*table_reader)->SetDataFileReader(std::move(data_file_reader));
166
68.8k
  }
167
71.3k
  TEST_SYNC_POINT("TableCache::GetTableReader:0");
168
71.3k
  return s;
169
71.3k
}
170
171
Status TableCache::FindTable(const EnvOptions& env_options,
172
                             const InternalKeyComparatorPtr& internal_comparator,
173
                             const FileDescriptor& fd, Cache::Handle** handle,
174
                             const QueryId query_id, const bool no_io, bool record_read_stats,
175
14.7M
                             HistogramImpl* file_read_hist, bool skip_filters) {
176
14.7M
  PERF_TIMER_GUARD(find_table_nanos);
177
14.7M
  Status s;
178
14.7M
  uint64_t number = fd.GetNumber();
179
14.7M
  Slice key = GetSliceForFileNumber(&number);
180
14.7M
  *handle = cache_->Lookup(key, query_id);
181
14.7M
  TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0",
182
14.7M
      const_cast<bool*>(&no_io));
183
184
14.7M
  if (*handle == nullptr) {
185
95.5k
    if (no_io) {  // Don't do IO and return a not-found status
186
24.1k
      return STATUS(Incomplete, "Table not found in table_cache, no_io is set");
187
24.1k
    }
188
71.4k
    unique_ptr<TableReader> table_reader;
189
71.4k
    s = DoGetTableReader(env_options, internal_comparator, fd,
190
71.4k
        false /* sequential mode */, record_read_stats,
191
71.4k
        file_read_hist, &table_reader, skip_filters);
192
71.4k
    if (!s.ok()) {
193
555
      assert(table_reader == nullptr);
194
555
      RecordTick(ioptions_.statistics, NO_FILE_ERRORS);
195
      // We do not cache error results so that if the error is transient,
196
      // or somebody repairs the file, we recover automatically.
197
70.8k
    } else {
198
70.8k
      s = cache_->Insert(key, query_id, table_reader.get(), 1, &DeleteEntry<TableReader>, handle);
199
70.8k
      if (s.ok()) {
200
        // Release ownership of table reader.
201
70.8k
        table_reader.release();
202
70.8k
      }
203
70.8k
    }
204
71.4k
  }
205
14.7M
  return s;
206
14.7M
}
207
208
TableCache::TableReaderWithHandle::TableReaderWithHandle(TableReaderWithHandle&& rhs)
209
    : table_reader(rhs.table_reader), handle(rhs.handle), cache(rhs.cache),
210
12.9M
      created_new(rhs.created_new) {
211
12.9M
  rhs.Release();
212
12.9M
}
213
214
TableCache::TableReaderWithHandle& TableCache::TableReaderWithHandle::operator=(
215
6.45M
    TableReaderWithHandle&& rhs) {
216
6.45M
  if (&rhs != this) {
217
6.45M
    Reset();
218
6.45M
    table_reader = rhs.table_reader;
219
6.45M
    handle = rhs.handle;
220
6.45M
    cache = rhs.cache;
221
6.45M
    created_new = rhs.created_new;
222
6.45M
    rhs.Release();
223
6.45M
  }
224
6.45M
  return *this;
225
6.45M
}
226
227
23.1M
void TableCache::TableReaderWithHandle::Release() {
228
23.1M
  table_reader = nullptr;
229
23.1M
  handle = nullptr;
230
23.1M
  cache = nullptr;
231
23.1M
  created_new = false;
232
23.1M
}
233
234
32.2M
void TableCache::TableReaderWithHandle::Reset() {
235
  // TODO: can we remove created_new and check !handle instead?
236
32.2M
  if (created_new) {
237
0
    DCHECK(handle == nullptr);
238
0
    delete table_reader;
239
32.2M
  } else if (handle != nullptr) {
240
2.68M
    DCHECK_ONLY_NOTNULL(cache);
241
2.68M
    cache->Release(handle);
242
2.68M
  }
243
32.2M
}
244
245
25.8M
TableCache::TableReaderWithHandle::~TableReaderWithHandle() {
246
25.8M
  Reset();
247
25.8M
}
248
249
Status TableCache::DoGetTableReaderForIterator(
250
    const ReadOptions& options,
251
    const EnvOptions& env_options,
252
    const InternalKeyComparatorPtr& icomparator,
253
    const FileDescriptor& fd, TableReaderWithHandle* trwh,
254
    HistogramImpl* file_read_hist,
255
    bool for_compaction,
256
6.44M
    bool skip_filters) {
257
6.44M
  const bool create_new_table_reader =
258
6.44M
      (for_compaction && ioptions_.new_table_reader_for_compaction_inputs);
259
6.44M
  if (create_new_table_reader) {
260
465
    unique_ptr<TableReader> table_reader_unique_ptr;
261
465
    Status s = DoGetTableReader(
262
465
        env_options, icomparator, fd, /* sequential mode */ true,
263
465
        /* record stats */ false, nullptr, &table_reader_unique_ptr);
264
465
    if (!s.ok()) {
265
0
      return s;
266
0
    }
267
465
    trwh->table_reader = table_reader_unique_ptr.release();
268
6.44M
  } else {
269
6.44M
    *trwh = VERIFY_RESULT(GetTableReader(
270
6.44M
        env_options, icomparator, fd, options.query_id,
271
6.44M
        /* no_io =*/ options.read_tier == kBlockCacheTier, file_read_hist, skip_filters));
272
6.44M
  }
273
6.44M
  trwh->created_new = create_new_table_reader;
274
6.44M
  return Status::OK();
275
6.44M
}
276
277
Status TableCache::GetTableReaderForIterator(
278
    const ReadOptions& options, const EnvOptions& env_options,
279
    const InternalKeyComparatorPtr& icomparator, const FileDescriptor& fd,
280
    TableReaderWithHandle* trwh,
281
6.20M
    HistogramImpl* file_read_hist, bool for_compaction, bool skip_filters) {
282
6.20M
  PERF_TIMER_GUARD(new_table_iterator_nanos);
283
6.20M
  return DoGetTableReaderForIterator(options, env_options, icomparator, fd, trwh, file_read_hist,
284
6.20M
      for_compaction, skip_filters);
285
6.20M
}
286
287
InternalIterator* TableCache::NewIterator(
288
    const ReadOptions& options, TableReaderWithHandle* trwh, const Slice& filter,
289
3.51M
    bool for_compaction, Arena* arena, bool skip_filters) {
290
3.51M
  PERF_TIMER_GUARD(new_table_iterator_nanos);
291
3.51M
  return DoNewIterator(options, trwh, filter, for_compaction, arena, skip_filters);
292
3.51M
}
293
294
InternalIterator* TableCache::DoNewIterator(
295
    const ReadOptions& options, TableReaderWithHandle* trwh, const Slice& filter,
296
3.76M
    bool for_compaction, Arena* arena, bool skip_filters) {
297
3.76M
  RecordTick(ioptions_.statistics, NO_TABLE_CACHE_ITERATORS);
298
299
3.76M
  InternalIterator* result =
300
3.76M
      trwh->table_reader->NewIterator(options, arena, skip_filters);
301
302
3.76M
  if (trwh->created_new) {
303
465
    DCHECK(trwh->handle == nullptr);
304
465
    result->RegisterCleanup(&DeleteTableReader, trwh->table_reader, nullptr);
305
3.76M
  } else if (trwh->handle != nullptr) {
306
3.75M
    result->RegisterCleanup(&UnrefEntry, cache_, trwh->handle);
307
3.75M
  }
308
309
3.76M
  if (for_compaction) {
310
45.5k
    trwh->table_reader->SetupForCompaction();
311
45.5k
  }
312
313
3.76M
  if (ioptions_.iterator_replacer) {
314
3.42M
    result = (*ioptions_.iterator_replacer)(result, arena, filter);
315
3.42M
  }
316
317
3.76M
  trwh->Release();
318
319
3.76M
  return result;
320
3.76M
}
321
322
InternalIterator* TableCache::NewIterator(
323
    const ReadOptions& options, const EnvOptions& env_options,
324
    const InternalKeyComparatorPtr& icomparator, const FileDescriptor& fd, const Slice& filter,
325
    TableReader** table_reader_ptr, HistogramImpl* file_read_hist,
326
250k
    bool for_compaction, Arena* arena, bool skip_filters) {
327
250k
  PERF_TIMER_GUARD(new_table_iterator_nanos);
328
329
250k
  if (table_reader_ptr != nullptr) {
330
16.8k
    *table_reader_ptr = nullptr;
331
16.8k
  }
332
333
250k
  TableReaderWithHandle trwh;
334
250k
  Status s = DoGetTableReaderForIterator(options, env_options, icomparator, fd, &trwh,
335
250k
      file_read_hist, for_compaction, skip_filters);
336
250k
  if (!s.ok()) {
337
33
    return NewErrorInternalIterator(s, arena);
338
33
  }
339
340
250k
  if (table_reader_ptr != nullptr) {
341
16.8k
    *table_reader_ptr = trwh.table_reader;
342
16.8k
  }
343
344
250k
  InternalIterator* result = DoNewIterator(
345
250k
      options, &trwh, filter, for_compaction, arena, skip_filters);
346
347
250k
  return result;
348
250k
}
349
350
Status TableCache::Get(const ReadOptions& options,
351
    const InternalKeyComparatorPtr& internal_comparator,
352
    const FileDescriptor& fd, const Slice& k,
353
    GetContext* get_context, HistogramImpl* file_read_hist,
354
8.29M
    bool skip_filters) {
355
8.29M
  TableReader* t = fd.table_reader;
356
8.29M
  Status s;
357
8.29M
  Cache::Handle* handle = nullptr;
358
8.29M
  std::string* row_cache_entry = nullptr;
359
360
8.29M
#ifndef ROCKSDB_LITE
361
8.29M
  IterKey row_cache_key;
362
8.29M
  std::string row_cache_entry_buffer;
363
364
  // Check row cache if enabled. Since row cache does not currently store
365
  // sequence numbers, we cannot use it if we need to fetch the sequence.
366
8.29M
  if (ioptions_.row_cache && !get_context->NeedToReadSequence()) {
367
51.6k
    uint64_t fd_number = fd.GetNumber();
368
51.6k
    auto user_key = ExtractUserKey(k);
369
    // We use the user key as cache key instead of the internal key,
370
    // otherwise the whole cache would be invalidated every time the
371
    // sequence key increases. However, to support caching snapshot
372
    // reads, we append the sequence number (incremented by 1 to
373
    // distinguish from 0) only in this case.
374
51.6k
    uint64_t seq_no =
375
51.6k
        options.snapshot == nullptr ? 0 : 1 + GetInternalKeySeqno(k);
376
377
    // Compute row cache key.
378
51.6k
    row_cache_key.TrimAppend(row_cache_key.Size(), row_cache_id_.data(),
379
51.6k
        row_cache_id_.size());
380
51.6k
    AppendVarint64(&row_cache_key, fd_number);
381
51.6k
    AppendVarint64(&row_cache_key, seq_no);
382
51.6k
    row_cache_key.TrimAppend(row_cache_key.Size(), user_key.cdata(),
383
51.6k
        user_key.size());
384
385
51.6k
    if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetKey(), options.query_id)) {
386
6.61k
      auto found_row_cache_entry = static_cast<const std::string*>(
387
6.61k
          ioptions_.row_cache->Value(row_handle));
388
6.61k
      replayGetContextLog(*found_row_cache_entry, user_key, get_context);
389
6.61k
      ioptions_.row_cache->Release(row_handle);
390
6.61k
      RecordTick(ioptions_.statistics, ROW_CACHE_HIT);
391
6.61k
      return Status::OK();
392
6.61k
    }
393
394
    // Not found, setting up the replay log.
395
45.0k
    RecordTick(ioptions_.statistics, ROW_CACHE_MISS);
396
45.0k
    row_cache_entry = &row_cache_entry_buffer;
397
45.0k
  }
398
8.29M
#endif  // ROCKSDB_LITE
399
400
8.29M
  if (!t) {
401
8.23M
    s = FindTable(env_options_, internal_comparator, fd, &handle,
402
8.23M
                  options.query_id, options.read_tier == kBlockCacheTier /* no_io */,
403
8.23M
                  true /* record_read_stats */, file_read_hist, skip_filters);
404
8.23M
    if (s.ok()) {
405
8.23M
      t = GetTableReaderFromHandle(handle);
406
8.23M
    }
407
8.23M
  }
408
8.29M
  if (s.ok()) {
409
8.29M
    get_context->SetReplayLog(row_cache_entry);  // nullptr if no cache.
410
8.29M
    s = t->Get(options, k, get_context, skip_filters);
411
8.29M
    get_context->SetReplayLog(nullptr);
412
8.29M
    if (handle != nullptr) {
413
8.23M
      ReleaseHandle(handle);
414
8.23M
    }
415
575
  } else if (options.read_tier == kBlockCacheTier && s.IsIncomplete()) {
416
    // Couldn't find Table in cache but treat as kFound if no_io set
417
100
    get_context->MarkKeyMayExist();
418
100
    return Status::OK();
419
100
  }
420
421
8.29M
#ifndef ROCKSDB_LITE
422
  // Put the replay log in row cache only if something was found.
423
8.29M
  if (s.ok() && row_cache_entry && !row_cache_entry->empty()) {
424
42.7k
    size_t charge =
425
42.7k
        row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string);
426
42.7k
    void* row_ptr = new std::string(std::move(*row_cache_entry));
427
42.7k
    s = ioptions_.row_cache->Insert(row_cache_key.GetKey(), options.query_id, row_ptr, charge,
428
42.7k
                                    &DeleteEntry<std::string>);
429
42.7k
  }
430
8.29M
#endif  // ROCKSDB_LITE
431
432
8.29M
  return s;
433
8.29M
}
434
435
yb::Result<TableCache::TableReaderWithHandle> TableCache::GetTableReader(
436
    const EnvOptions& env_options, const InternalKeyComparatorPtr& internal_comparator,
437
    const FileDescriptor& fd, const QueryId query_id, const bool no_io,
438
6.45M
    HistogramImpl* file_read_hist, const bool skip_filters) {
439
6.45M
  TableReaderWithHandle trwh;
440
6.45M
  trwh.table_reader = fd.table_reader;
441
6.45M
  if (trwh.table_reader == nullptr) {
442
6.43M
    RETURN_NOT_OK(FindTable(
443
6.43M
        env_options, internal_comparator, fd, &trwh.handle, query_id, no_io,
444
6.43M
        /* record_read_stats =*/ true, file_read_hist, skip_filters));
445
6.43M
    trwh.table_reader = GetTableReaderFromHandle(trwh.handle);
446
6.43M
    trwh.cache = cache_;
447
6.43M
  }
448
6.45M
  return trwh;
449
6.45M
}
450
451
Status TableCache::GetTableProperties(
452
    const EnvOptions& env_options,
453
    const InternalKeyComparatorPtr& internal_comparator, const FileDescriptor& fd,
454
94.1k
    std::shared_ptr<const TableProperties>* properties, bool no_io) {
455
94.1k
  Status s;
456
94.1k
  auto table_reader = fd.table_reader;
457
  // table already been pre-loaded?
458
94.1k
  if (table_reader) {
459
1.18k
    *properties = table_reader->GetTableProperties();
460
461
1.18k
    return s;
462
1.18k
  }
463
464
92.9k
  Cache::Handle* table_handle = nullptr;
465
92.9k
  s = FindTable(env_options, internal_comparator, fd, &table_handle, kDefaultQueryId, no_io);
466
92.9k
  if (!s.ok()) {
467
24.0k
    return s;
468
24.0k
  }
469
68.9k
  assert(table_handle);
470
68.9k
  auto table = GetTableReaderFromHandle(table_handle);
471
68.9k
  *properties = table->GetTableProperties();
472
68.9k
  ReleaseHandle(table_handle);
473
68.9k
  return s;
474
68.9k
}
475
476
size_t TableCache::GetMemoryUsageByTableReader(
477
    const EnvOptions& env_options,
478
    const InternalKeyComparatorPtr& internal_comparator,
479
80
    const FileDescriptor& fd) {
480
80
  Status s;
481
80
  auto table_reader = fd.table_reader;
482
  // table already been pre-loaded?
483
80
  if (table_reader) {
484
0
    return table_reader->ApproximateMemoryUsage();
485
0
  }
486
487
80
  Cache::Handle* table_handle = nullptr;
488
80
  s = FindTable(env_options, internal_comparator, fd, &table_handle, kDefaultQueryId, true);
489
80
  if (!s.ok()) {
490
3
    return 0;
491
3
  }
492
77
  assert(table_handle);
493
77
  auto table = GetTableReaderFromHandle(table_handle);
494
77
  auto ret = table->ApproximateMemoryUsage();
495
77
  ReleaseHandle(table_handle);
496
77
  return ret;
497
77
}
498
499
46.9k
void TableCache::Evict(Cache* cache, uint64_t file_number) {
500
46.9k
  cache->Erase(GetSliceForFileNumber(&file_number));
501
46.9k
}
502
503
}  // namespace rocksdb