Question

Formatted question description: https://leetcode.ca/all/146.html

146	LRU Cache

Design and implement a data structure for Least Recently Used (LRU) cache.
It should support the following operations: get and set.
    get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
    set(key, value) - Set or insert the value if the key is not already present.
                        When the cache reached its capacity, it should invalidate the least recently used item
                        before inserting a new item.

Example:

LRUCache cache = new LRUCache( 2 ); // capacity

    cache.put(1, 1);
    cache.put(2, 2);
    cache.get(1);       // returns 1
    cache.put(3, 3);    // evicts key 2
    cache.get(2);       // returns -1 (not found)
    cache.put(4, 4);    // evicts key 1
    cache.get(1);       // returns -1 (not found)
    cache.get(3);       // returns 3
    cache.get(4);       // returns 4

Follow up:
Could you do both operations in O(1) time complexity?


@tag-design

Algorithm

Solution

Create a class MyNode, which contains data fields int key, int value, Node prev and Node next. That is, each node of type MyNode has a key and a value, and have references to its previous node and the next node.

In class LRUCache, data fields include int capacity that stores the capacity of the cache, Map<Integer, MyNode> map that maps each key to its node, MyNode head and Node tail that represents the head node and the tail node respectively. For Least Recently Used cache, the most recently used node is the head node and the least recently used node is the tail node.

In the constructor, initialize capacity with the given capacity.

In get(key), if key is not in map, then key is not in the cache, so return -1. If key is in map, obtain the node and its value, remove the node and set the node to be the head, and return value.

In put(key, value), if map contains key, then obtain the node and update its value, remove the node, and set the node to be the head. If map does not contain key, then create a new node using key and value, and set the new node to be the head. If the size of map is greater than or equal to capacity, then remove the node tail and remove the corresponding entry in map. Add a new entry of the new node into the map.

Two supplementary methods are needed.

  1. Method remove(MyNode node). Obtain MyNode’s previous node and next node, and update their references to other nodes accordingly. If MyNode is head or tail, then update head or tail accordingly.
  2. Method setHead(MyNode node). Set MyNode to be the new head and set the previous head’s reference accordingly. If tail is null, then update tail as well.

Code

Java

  • class LRUCache {
        int capacity;
        Map<Integer, MyNode> map = new HashMap<Integer, MyNode>(); // key => Node[key,val]
        MyNode head = null;
        MyNode tail = null;
    
        public LRUCache(int capacity) {
            this.capacity = capacity;
        }
    
        public int get(int key) {
            MyNode node = map.get(key);
            if (node == null) {
                return -1;
            } else {
                remove(node);
                setHead(node);
                return node.value;
            }
        }
    
        public void put(int key, int value) {
            if (map.containsKey(key)) {
                MyNode node = map.get(key);
                node.value = value;
                remove(node);
                setHead(node);
            } else {
                MyNode node = new MyNode(key, value);
                setHead(node);
                map.put(key, node);
                if (map.size() >= capacity) { // @note: also = , after if will add one more
                    map.remove(tail.key);
                    remove(tail);
                }
            }
        }
    
        public void remove(MyNode node) {
            MyNode prev = node.prev;
            MyNode next = node.next;
    
            // process previous node
            if (prev != null)
                prev.next = next;
            else
                head = next;
    
            // process next node
            if (next != null)
                next.prev = prev;
            else
                tail = prev;
        }
    
        public void setHead(MyNode node) {
            node.next = head;
            node.prev = null;
            if (head != null)
                head.prev = node;
            head = node;
            if (tail == null)
                tail = head;
        }
    }
    
    class MyNode {
        int key;
        int value;
        MyNode prev;
        MyNode next;
    
        public MyNode(int key, int value) {
            this.key = key;
            this.value = value;
        }
    }
    /**
     * Your LRUCache object will be instantiated and called as such:
     * LRUCache obj = new LRUCache(capacity);
     * int param_1 = obj.get(key);
     * obj.put(key,value);
     */
    
    
  • // OJ: https://leetcode.com/problems/lru-cache
    // Time:
    //    LRUCache: O(1)
    //    get: O(1)
    //    put: O(1)
    // Space: O(N)
    class LRUCache {
        int capacity;
        list<pair<int, int>> data;
        unordered_map<int, list<pair<int, int>>::iterator> m;
        void moveToFront(int key) {
            auto node = m[key];
            data.splice(data.begin(), data, node);
            m[key] = data.begin();
        }
    public:
        LRUCache(int capacity) : capacity(capacity) {}
        int get(int key) {
            // get node given key, put the node at the beginning of the list, return the value in the node
            if (m.count(key)) {
                moveToFront(key);
                return m[key]->second;
            }
            return -1;
        }
        void put(int key, int value) {
            // if key exists in the map, get node given key, put the node at the beginning of the list and update the value in the node
            // otherwise, put a new node at the beginning of the list with the <key, value> and update the map. If capacity exceeded, remove the last node from the list and map.
            if (m.count(key)) {
                moveToFront(key);
                m[key]->second = value;
            } else {
                data.emplace_front(key, value);
                m[key] = data.begin();
                if (data.size() > capacity) {
                    m.erase(data.rbegin()->first);
                    data.pop_back();
                }
            }
        }
    };
    
  • '''
    example:
    
    >>> od = collections.OrderedDict()
    >>>
    >>> od[1]=1
    >>> od[2]=2
    >>> od[3]=3
    >>>
    >>> od
    OrderedDict([(1, 1), (2, 2), (3, 3)])
    >>>
    >>> od.move_to_end(1)
    >>>
    >>> od
    OrderedDict([(2, 2), (3, 3), (1, 1)])
    >>>
    >>> od.get(1)
    1
    >>>
    >>> od.popitem()
    (1, 1)
    >>>
    >>> od
    OrderedDict([(2, 2), (3, 3)])
    >>>
    >>> od[1]=1
    >>> od
    OrderedDict([(2, 2), (3, 3), (1, 1)])
    >>>
    >>> od.popitem(last=False)
    (2, 2)
    >>> od
    OrderedDict([(3, 3), (1, 1)])
    
    '''
    
    import collections
    
    class LRUCache:
        def __init__(self, capacity: 'int'):
            self.cache = collections.OrderedDict()
            self.remain = capacity
    
        def get(self, key: 'int') -> 'int':
            if key not in self.cache:
                return -1
            self.cache.move_to_end(key) # meaning end is the most recently used
            return self.cache.get(key)
    
        def put(self, key: 'int', value: 'int') -> 'None':
            if key not in self.cache:
                if self.remain > 0:
                    self.remain -= 1
                else:
                    self.cache.popitem(last=False) # pop start position
            else:
                self.cache.pop(key)
            self.cache[key] = value # add to end of dict, meaning most recently used
    
    
    
    ### below solution with no ordered-dict
    class DLinkedNode:
    
        def __init__(self, key=0, value=0):
            self.key = key
            self.val = value
            self.next = None
            self.prev = None
    
    
    class DLinkedList:
    
        def __init__(self):
            self.head = DLinkedNode() # dummy head, its next is real head
            self.tail = DLinkedNode() # dummy tail, its prev is real tail
            self.head.next, self.tail.prev = self.tail, self.head
    
        def add_node(self, node): # add to head
            node.prev, node.next = self.head, self.head.next
            node.next.prev, node.prev.next = node, node
    
        def remove_node(self, node):
            node.prev.next, node.next.prev = node.next, node.prev
            return node.key
    
        def remove_tail(self):
            return self.remove_node(self.tail.prev) # dummy tail's prev is real tail
    
        def move_to_head(self, node):
            self.remove_node(node)
            self.add_node(node)
    
        # def __repr__(self):
        #     ans = []
        #     h = self.head
        #     while h:
        #         ans.append(str(h.val))
        #         h = h.next
        #     return '<DLinkedList: {}>'.format('->'.join(ans))
    
    
    class LRUCache:
    
        def __init__(self, capacity: int):
            self.capacity = capacity
            self.nodes_map = {}
            self.cache_list = DLinkedList()
    
        def get(self, key: int) -> int:
            node = self.nodes_map.get(key, None)
            if node:
                self.cache_list.move_to_head(node)
                return node.val
            else:
                return -1
    
        def put(self, key: int, value: int) -> None:
            node = self.nodes_map.get(key, None)
            if not node:
                if self.capacity > 0:
                    self.capacity -= 1
                else:
                    rm_key = self.cache_list.remove_tail()
                    self.nodes_map.pop(rm_key) # note api
                new_node = DLinkedNode(key, value)
                self.nodes_map[key] = new_node
                self.cache_list.add_node(new_node)
            else:
                self.cache_list.move_to_head(node)
                node.val = value
    
    

All Problems

All Solutions