-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLruCache.h
More file actions
327 lines (278 loc) · 8.88 KB
/
LruCache.h
File metadata and controls
327 lines (278 loc) · 8.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
#pragma once
#include <cstring>
#include <list>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <vector>
#include <thread>
#include <cmath>
#include "CacheStrategy.h"
namespace MyCache
{
// Forward declaration
template<typename Key, typename Value> class LruCache;
template<typename Key, typename Value>
class LruNode
{
private:
Key key_;
Value value_;
size_t accessCount_; // Number of accesses
std::weak_ptr<LruNode<Key, Value>> prev_; // Use weak_ptr to break cyclic reference
std::shared_ptr<LruNode<Key, Value>> next_;
public:
LruNode(Key key, Value value)
: key_(key)
, value_(value)
, accessCount_(1)
{}
// Basic accessors
Key getKey() const { return key_; }
Value getValue() const { return value_; }
void setValue(const Value& value) { value_ = value; }
size_t getAccessCount() const { return accessCount_; }
void incrementAccessCount() { ++accessCount_; }
friend class LruCache<Key, Value>;
};
template<typename Key, typename Value>
class LruCache : public CacheStrategy<Key, Value>
{
public:
using LruNodeType = LruNode<Key, Value>;
using NodePtr = std::shared_ptr<LruNodeType>;
using NodeMap = std::unordered_map<Key, NodePtr>;
LruCache(int capacity)
: capacity_(capacity)
{
initializeList();
}
~LruCache() override = default;
// Insert or update a cache entry
void put(Key key, Value value) override
{
if (capacity_ <= 0)
return;
std::lock_guard<std::mutex> lock(mutex_);
auto it = nodeMap_.find(key);
if (it != nodeMap_.end())
{
// Key exists: update value and mark as recently accessed
updateExistingNode(it->second, value);
return ;
}
addNewNode(key, value);
}
bool get(Key key, Value& value) override
{
std::lock_guard<std::mutex> lock(mutex_);
auto it = nodeMap_.find(key);
if (it != nodeMap_.end())
{
moveToMostRecent(it->second);
value = it->second->getValue();
return true;
}
return false;
}
Value get(Key key) override
{
Value value{};
// memset operates byte‑wise; using it on complex types (e.g. std::string) may corrupt the object
get(key, value);
return value;
}
// Remove a specific element
void remove(Key key)
{
std::lock_guard<std::mutex> lock(mutex_);
auto it = nodeMap_.find(key);
if (it != nodeMap_.end())
{
removeNode(it->second);
nodeMap_.erase(it);
}
}
private:
void initializeList()
{
// Create sentinel head and tail nodes
dummyHead_ = std::make_shared<LruNodeType>(Key(), Value());
dummyTail_ = std::make_shared<LruNodeType>(Key(), Value());
dummyHead_->next_ = dummyTail_;
dummyTail_->prev_ = dummyHead_;
}
void updateExistingNode(NodePtr node, const Value& value)
{
node->setValue(value);
moveToMostRecent(node);
}
void addNewNode(const Key& key, const Value& value)
{
if (nodeMap_.size() >= capacity_)
{
evictLeastRecent();
}
NodePtr newNode = std::make_shared<LruNodeType>(key, value);
insertNode(newNode);
nodeMap_[key] = newNode;
}
// Move the node to the MRU position
void moveToMostRecent(NodePtr node)
{
removeNode(node);
insertNode(node);
}
void removeNode(NodePtr node)
{
if(!node->prev_.expired() && node->next_)
{
auto prev = node->prev_.lock(); // use lock() to get the shared_ptr
prev->next_ = node->next_;
node->next_->prev_ = prev;
node->next_ = nullptr; // Break the node from the list
}
}
// Insert node just before the tail (MRU end)
void insertNode(NodePtr node)
{
node->next_ = dummyTail_;
node->prev_ = dummyTail_->prev_;
dummyTail_->prev_.lock()->next_ = node; // use lock() to get the shared_ptr
dummyTail_->prev_ = node;
}
// Evict the least recently used node
void evictLeastRecent()
{
NodePtr leastRecent = dummyHead_->next_;
removeNode(leastRecent);
nodeMap_.erase(leastRecent->getKey());
}
private:
int capacity_; // Cache capacity
NodeMap nodeMap_; // Maps key to node
std::mutex mutex_;
NodePtr dummyHead_; // Sentinel head
NodePtr dummyTail_; // Sentinel tail
};
// LRU optimization: LRU‑k variant for better filtering of ephemeral entries
template<typename Key, typename Value>
class LruKCache : public LruCache<Key, Value>
{
public:
LruKCache(int capacity, int historyCapacity, int k)
: LruCache<Key, Value>(capacity) // Call base constructor
, historyList_(std::make_unique<LruCache<Key, size_t>>(historyCapacity))
, k_(k)
{}
Value get(Key key)
{
// First try to fetch from the main cache
Value value{};
bool inMainCache = LruCache<Key, Value>::get(key, value);
// Retrieve and update access history
size_t historyCount = historyList_->get(key);
historyCount++;
historyList_->put(key, historyCount);
// If present in the main cache, return it
if (inMainCache)
{
return value;
}
// Promote to main cache once the entry has been accessed k times
if (historyCount >= k_)
{
// Check whether we stored a value earlier
auto it = historyValueMap_.find(key);
if (it != historyValueMap_.end())
{
Value storedValue = it->second;
// Cleanup history
historyList_->remove(key);
historyValueMap_.erase(it);
// Insert into main cache
LruCache<Key, Value>::put(key, storedValue);
return storedValue;
}
// No stored value available – fall through to return default
}
// Not found or not yet eligible for promotion
return value;
}
void put(Key key, Value value)
{
// Check if entry is already in the main cache
Value existingValue{};
bool inMainCache = LruCache<Key, Value>::get(key, existingValue);
if (inMainCache)
{
// Update existing record
LruCache<Key, Value>::put(key, value);
return;
}
// Update access history
size_t historyCount = historyList_->get(key);
historyCount++;
historyList_->put(key, historyCount);
// Keep the value for potential promotion
historyValueMap_[key] = value;
// Promote once the access threshold is reached
if (historyCount >= k_)
{
historyList_->remove(key);
historyValueMap_.erase(key);
LruCache<Key, Value>::put(key, value);
}
}
private:
int k_; // Access threshold to enter main cache
std::unique_ptr<LruCache<Key, size_t>> historyList_; // Access history (value = access count)
std::unordered_map<Key, Value> historyValueMap_; // Values not yet promoted
};
// LRU optimization: sharding to improve performance under high concurrency
template<typename Key, typename Value>
class HashLruCaches
{
public:
HashLruCaches(size_t capacity, int sliceNum)
: capacity_(capacity)
, sliceNum_(sliceNum > 0 ? sliceNum : std::thread::hardware_concurrency())
{
size_t sliceSize = std::ceil(capacity / static_cast<double>(sliceNum_)); // Capacity per shard
for (int i = 0; i < sliceNum_; ++i)
{
lruSliceCaches_.emplace_back(new LruCache<Key, Value>(sliceSize));
}
}
void put(Key key, Value value)
{
// Hash the key to find its shard
size_t sliceIndex = Hash(key) % sliceNum_;
lruSliceCaches_[sliceIndex]->put(key, value);
}
bool get(Key key, Value& value)
{
// Hash the key to find its shard
size_t sliceIndex = Hash(key) % sliceNum_;
return lruSliceCaches_[sliceIndex]->get(key, value);
}
Value get(Key key)
{
Value value;
memset(&value, 0, sizeof(value));
get(key, value);
return value;
}
private:
// Hash function for the key
size_t Hash(Key key)
{
std::hash<Key> hashFunc;
return hashFunc(key);
}
private:
size_t capacity_; // Total capacity
int sliceNum_; // Number of shards
std::vector<std::unique_ptr<LruCache<Key, Value>>> lruSliceCaches_; // Sharded LRU caches
};
} // namespace MyCache