1. 题意
请你设计并实现一个满足 LRU (最近最少使用) 缓存 约束的数据结构。
实现 LRUCache 类:
LRUCache(int capacity) 以 正整数 作为容量 capacity 初始化 LRU 缓存
int get(int key) 如果关键字 key 存在于缓存中,则返回关键字的值,否则返回 -1 。
void put(int key, int value) 如果关键字 key 已经存在,则变更其数据值 value ;如果不存在,则向缓存中插入该组 key-value 。如果插入操作导致关键字数量超过 capacity ,则应该 逐出 最久未使用的关键字。
函数 get 和 put 必须以 O(1) 的平均时间复杂度运行。
2. 题解
单次插入需要 O ( 1 ) O(1) O(1),
查找也需要是 O ( 1 ) O(1) O(1)。
因此我们需要哈希表和双向链表来完成这一操作。
2.1 我的解
直接自己写个双向链表。
同时我们需要维护链表头和尾。
需要注意的是一些异常情况,比如链表空,或者只有一个头节点。
cpp
class LRUCache {
public:
struct LRUNode {
LRUNode(int k, int v):key(k),value(v) {
}
int key;
int value;
};
struct LRUList {
LRUList *pre;
LRUList *next;
LRUNode *node;
};
LRUCache(int capacity):max_cap_(capacity){
}
int get(int key) {
// cout << "get " << key << "\n";
if ( hs.count(key) ) {
LRUList *cur = hs[key];
if ( cur != head) {
if ( cur == tail)
tail = cur->pre;
cur->pre->next = cur->next;
if (cur->next)
cur->next->pre = cur->pre;
head->pre = cur;
cur->next = head;
cur->pre = NULL;
head = cur;
}
return cur->node->value;
}
return -1;
}
void put(int key, int value) {
// cout << "put: [ " << key <<", " << value << " ]" << "\n";
if ( hs.count(key) ) {
LRUList *cur = hs[key];
cur->node->value = value;
if (cur == head)
return;
if ( cur == tail) {
tail = cur->pre;
}
cur->pre->next = cur->next;
if (cur->next)
cur->next->pre = cur->pre;
head->pre = cur;
cur->next = head;
cur->pre = NULL;
head = cur;
}
else {
LRUList *cur = new LRUList;
cur->pre = cur->next = NULL;
cur->node = new LRUNode(key, value);
++cur_cap_;
hs[key] = cur;
if (head)
head->pre = cur;
if (tail == NULL)
tail = cur;
cur->next = head;
head = cur;
if ( cur_cap_ > max_cap_) {
LRUList *del = tail;
// cout << "del " << del->node->key << "\n";
hs.erase(del->node->key);
tail = del->pre;
if (tail)
tail->next = NULL;
del->pre = NULL;
del->next = NULL;
delete del->node;
delete del;
--cur_cap_;
}
}
}
private:
int max_cap_;
int cur_cap_{};
unordered_map<int,LRUList *> hs;
LRUList *head{};
LRUList *tail{};
};
/**
* Your LRUCache object will be instantiated and called as such:
* LRUCache* obj = new LRUCache(capacity);
* int param_1 = obj->get(key);
* obj->put(key,value);
*/
2.2 0x3f的解
我看0x3f
的解,主要是模块化,还有加了一个哨兵节点就省去了首尾节点的判断。
cpp
class LRUCache {
public:
struct LRUNode {
LRUNode():pre(NULL),next(NULL) {
}
LRUNode(int key, int val):k(key),v(val), pre(NULL), next(NULL){
}
int k;
int v;
LRUNode *pre;
LRUNode *next;
};
private:
int cap_;
unordered_map<int, LRUNode *> key_to_node;
LRUNode *dumNode;
public:
LRUCache(int capacity) : cap_(capacity), dumNode(new LRUNode()) {
dumNode->pre = dumNode;
dumNode->next = dumNode;
}
void remove(LRUNode *cur) {
cur->pre->next = cur->next;
cur->next->pre = cur->pre;
cur->pre = cur->next = NULL;
}
void push_front(LRUNode *cur) {
cur->next = dumNode->next;
cur->pre = dumNode;
dumNode->next->pre =cur;
dumNode->next = cur;
}
int get(int key) {
//cout << "get " << key << "\n";
auto it = key_to_node.find(key);
if ( it != key_to_node.end()) {
LRUNode *cur = it->second;
remove( cur );
push_front( cur );
return cur->v;
}
return -1;
}
void put(int key, int value) {
//cout << "put [ " << key << ", " << value << " ]\n";
auto it = key_to_node.find(key);
if ( it != key_to_node.end()) {
LRUNode *cur = it->second;
cur->v = value;
remove( cur );
push_front( cur );
}
else {
LRUNode *cur = new LRUNode(key, value);
key_to_node[key] = cur;
push_front(cur);
if ( key_to_node.size() > cap_) {
LRUNode *del_node = dumNode->pre;
key_to_node.erase( del_node->k);
remove( del_node);
delete del_node;
}
}
}
};
还有一种就是使用标准库的双向链表了,不过标准库的api真的感觉好难用!
cpp
class LRUCache {
public:
struct LRUNode {
LRUNode(int k, int v):key(k),value(v) {
}
int key;
int value;
};
struct LRUList {
LRUList *pre;
LRUList *next;
LRUNode *node;
};
LRUCache(int capacity):max_cap_(capacity){
}
int get(int key) {
// cout << "get " << key << "\n";
auto it = key_to_it.find(key);
if ( it == key_to_it.end()) {
return -1;
}
cached_list.splice( cached_list.begin(), cached_list, it->second);
return cached_list.begin()->value;
}
void put(int key, int value) {
auto it = key_to_it.find( key );
if ( it != key_to_it.end()) {
it->second->value = value;
cached_list.splice( cached_list.begin(), cached_list, it->second);
}
else {
auto nNode = new LRUNode(key, value);
cached_list.push_front( *nNode );
key_to_it[key] = cached_list.begin();
if ( cached_list.size() > max_cap_) {
key_to_it.erase(cached_list.back().key );
cached_list.pop_back();
}
}
}
private:
int max_cap_;
unordered_map<int, list<LRUNode>::iterator> key_to_it;
list<LRUNode> cached_list;
};
/**
* Your LRUCache object will be instantiated and called as such:
* LRUCache* obj = new LRUCache(capacity);
* int param_1 = obj->get(key);
* obj->put(key,value);
*/