您当前的位置: 首页 >  缓存

蔚1

暂无认证

  • 0浏览

    0关注

    4753博文

    0收益

  • 0浏览

    0点赞

    0打赏

    0留言

私信
关注
热门博文

浅谈LRU、LFU缓存,或许你可以用得到

蔚1 发布时间:2020-04-08 23:30:56 ,浏览量:0

LRU:优先淘汰最久未使用缓存

LFU:优先淘汰最不经常使用缓存,若使用频率相同则按 LRU 策略淘汰

遵循要面向接口,面向抽象编程的原则,首先建个缓存抽象类:

public abstract class Cache {    //缓存容量    protected int capacity;    protected Cache(int capacity) {          this.capacity = capacity;     }    /**    *获取缓存    */    public abstract V get(K key);    /**    *保存缓存    */    public abstract void put(K key, V value);}

下面先实现 LRU(优先淘汰最久未使用缓存)为了能尽量达到 o(1)的时间复杂度,本例采用 map 作为存储结构而施行淘汰策略时如何找到那个最久未使用的缓存,为了也能达到 o(1)的时间复杂度,貌似只能用双向链表结构,而这个节点的值存储的就是缓存的键,只要保证每次使用缓存就将该节点压入尾部即可,基本思路很简单,下面上数据结构:

class Node {    //这里用于存储缓存键    volatile E val;    volatile Node next;    volatile Node prev;    //以下是一些关于 cas 的操作,可找相关文档或者源码了解如何使用    //相对锁来说避免的因线程的挂起导致频繁内核状态的切换带来的开销    static final sun.misc.Unsafe unsafe;    private static final long valOffset;    private static final long nextOffset;    private static final long prevOffset;    static {          try {               Field unsafeField = Unsafe.class.getDeclaredFields()[0];               unsafeField.setAccessible(true);               Unsafe temp = null;               try {                 temp = (Unsafe) unsafeField.get(null);               } catch (IllegalArgumentException | IllegalAccessException e) {                 temp = null;               }               unsafe = temp;               temp = null;               Class k = Node.class;               valOffset = unsafe.objectFieldOffset(k.getDeclaredField("val"));               nextOffset = unsafe.objectFieldOffset(k.getDeclaredField("next"));               prevOffset = unsafe.objectFieldOffset(k.getDeclaredField("prev"));         } catch (Exception e) {               throw new Error(e);          }    }    Node(E val) {         unsafe.putObject(this, valOffset, val);     }    boolean casNext(Node expect, Node next) {         return unsafe.compareAndSwapObject(this, nextOffset, expect, next);     }    boolean casPrev(Node expect, Node prev) {          return unsafe.compareAndSwapObject(this, prevOffset, expect, prev);     }    /**    *弹出当前节点,当获取一个已经存在的缓存时,需要将该缓存对应的节点弹出双向链表,并压入尾部    */    void poll() {          Node prev;          Node next;          do {               prev = this.prev;               next = this.next;          } while (!prev.casNext(this, next) || !next.casPrev(this, prev));     }    /**    *将当前节点压入尾部,一般是被刚弹出的节点或者新加入的缓存节点    */    void offer(Node tail) {          Node prev;          do {               prev = tail.prev;               this.prev = prev;               this.next = tail;          } while (!prev.casNext(tail, this) || !tail.casPrev(prev, this));     }}

下面贴出 LRUCahce 完整类结构:

public class LRUCache extends Cache {    private static class LRUCacheValue {        //该节点的值指向该缓存在 map 中的 key        private volatile Node node;        //具体的缓存值          private volatile V val;        private LRUCacheValue(K key, V value) {               node = new Node(key);               val = value;          }    }     //存放缓存的 map     private final ConcurrentHashMap cache;     //头节点,该节点的 next 节点即最久未使用的缓存     private volatile Node head;     //尾节点,该节点的 prev 节点即刚使用过的缓存或刚插入的缓存     private volatile Node tail;     //用于并发控制新增缓存的状态值,为 0 表示可以新增缓存     volatile int state;     private static final long headOffset;     private static final long tailOffset;     private static final long stateOffset;     static {          try {            Class k = LRUCache.class;            headOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("head"));            tailOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("tail"));            stateOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("state"));          } catch (Exception e) {            throw new Error(e);          }      }    private boolean casHead(Node expect, Node head) {          return Node.unsafe.compareAndSwapObject(this, headOffset, expect, head);    }    private boolean casTail(Node expect, Node tail) {          return Node.unsafe.compareAndSwapObject(this, tailOffset, expect, tail);     }    /**    *判断是否可以新增缓存    */    private boolean casState(int expect, int state) {          return Node.unsafe.compareAndSwapInt(this, stateOffset, expect, state);    }    public LRUCache(int capacity) {         super(capacity);          cache = capacity < 64 ? new ConcurrentHashMap(capacity + 1, 1.0f) : new ConcurrentHashMap();          casHead(null, new Node(null));          casTail(null, new Node(null));          head.casNext(null, tail);         tail.casPrev(null, head);     }    @Override     public V get(K key) {          LRUCacheValue lruCacheValue = cache.get(key);          if (lruCacheValue != null) {               // 如果存在则弹出               poll(lruCacheValue.node);               // 压入尾节点前,表示最近使用过               offer(lruCacheValue.node);          }          return lruCacheValue == null ? null : lruCacheValue.val;     }    private K poll(Node node) {          if (node == null || node == head || node == tail)               return null;          node.poll();          return node.val;     }    private void offer(Node node) {          if (node == null)               return;           node.offer(tail);     }    @Override     public void put(K key, V value) {          restartFromHead: for (;;) {               LRUCacheValue lruCacheValue = cache.get(key);               if (lruCacheValue != null) {                    // 如果存在则弹出                    poll(lruCacheValue.node);                     // 更新缓存值                    lruCacheValue.val = value;                     cache.put(key, lruCacheValue);                     // 压入尾节点前,表示最近使用过                    offer(lruCacheValue.node);               } else {                    // 不存在缓存                    if (!casState(0, 1))                         // 这里利用 cas 替换锁,分不同情况也可以选择加锁,确保不被 new 多个对象                         continue restartFromHead;                    lruCacheValue = new LRUCacheValue(key, value);                    K k = null;                    // 判断缓存容量并弹出头节点的 next 节点                    while (cache.size() >= capacity && (k = poll(head.next)) != null)                         cache.remove(k);                     // 保存新缓存                    cache.put(key, lruCacheValue);                     // 将新缓存节点压入尾节点前                    offer(lruCacheValue.node);                     // 恢复可 new 状态                     state = 0;               }              break;          }     }}

下面测试下:

public class LRUCacheTest {    public static void main(String[] args) throws InterruptedException {        LRUCache lruCache = new LRUCache(2);        lruCache.put(1, 1);        lruCache.put(2, 2);        System.out.println(lruCache.get(2));        System.out.println("ans:2");        lruCache.put(3, 3);        System.out.println(lruCache.get(1));        System.out.println(lruCache.get(2));        System.out.println(lruCache.get(3));        System.out.println("ans:null,2,3");        lruCache.put(4, 4);        System.out.println(lruCache.get(1));        System.out.println(lruCache.get(2));        System.out.println(lruCache.get(3));        System.out.println(lruCache.get(4));        System.out.println("ans:null,null,3,4");     }}

LRU 完成,下面上 LFU 代码:

LFU(优先淘汰最不经常使用缓存,若使用频率相同则按 LRU 策略淘汰)相比 LRU 多了一个条件,使用频率存储结构还是那个 map,节点依然还是那个节点我们需要对使用频率经行排序,我想到的是用 TreeMap 来存储头节点和尾节点,使用频率作为 key,map 的 value 中再加一个属性使用次数

下面贴出 LFUCache 完整类结构:

public class LFUCache extends Cache {    private static class LFUCacheValue {        private volatile Node node;          private volatile V val;          //比 LRU 多加一个使用次数属性          private volatile int useTimes;          private static final long useTimesOffset;        static {              try {                    Class k = LFUCacheValue.class;                    useTimesOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("useTimes"));               } catch (Exception e) {                    throw new Error(e);              }          }        private LFUCacheValue(K key, V value) {               node = new Node(key);               val = value;          }        /**        *用于递增使用次数        */        private boolean casAddUseTimes(int expect) {               return Node.unsafe.compareAndSwapInt(this, useTimesOffset, expect, expect + 1);          }    }    private static class NodeInfo {        //头节点        private volatile Node head;        //尾节点          private volatile Node tail;          private static final long headOffset;          private static final long tailOffset;        static {               try {                    Class k = NodeInfo.class;                    headOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("head"));                    tailOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("tail"));               } catch (Exception e) {                    throw new Error(e);               }          }        private NodeInfo() {               casHead(null, new Node(null));              casTail(null, new Node(null));              head.casNext(null, tail);               tail.casPrev(null, head);          }        private boolean casHead(Node expect, Node head) {               return Node.unsafe.compareAndSwapObject(this, headOffset, expect, head);          }        private boolean casTail(Node expect, Node tail) {               return Node.unsafe.compareAndSwapObject(this, tailOffset, expect, tail);          }    }    private final ConcurrentHashMap cache;    //存放不同使用次数的头尾节点信息    private final TreeMap times;     volatile int state;     private static final long stateOffset;    static {          try {               Class k = LFUCache.class;               stateOffset = Node.unsafe.objectFieldOffset(k.getDeclaredField("state"));          } catch (Exception e) {               throw new Error(e);         }     }    private boolean casState(int expect, int state) {          return Node.unsafe.compareAndSwapInt(this, stateOffset, expect, state);     }    public LFUCache(int capacity) {          super(capacity);           cache = capacity < 64 ? new ConcurrentHashMap(capacity + 1, 1.0f) : new ConcurrentHashMap();           //使用次数最少的排在最前,其中的节点遵循 LRU 缓存策略          times = new TreeMap((t, u) -> t - u);     }    @Override     public V get(K key) {          LFUCacheValue lfuCacheValue = cache.get(key);          if (lfuCacheValue != null) {              // 获取当前缓存使用次数               int useTimes = lfuCacheValue.useTimes;              // 使用 cas 递增使用次数               casAddUseTimes(lfuCacheValue);              // 如果存在则弹出               poll(useTimes, lfuCacheValue.node);              // 压入对应使用次数+1 的尾节点前               offer(useTimes + 1, lfuCacheValue.node);          }         return lfuCacheValue == null ? null : lfuCacheValue.val;     }    /**    *原子操作增加使用次数    */    private void casAddUseTimes(LFUCacheValue lfuCacheValue) {          int useTimes;          do {               useTimes = lfuCacheValue.useTimes;          } while (!lfuCacheValue.casAddUseTimes(useTimes));     }    /**    *根据使用次数找到对应的节点信息并弹出    */    private K poll(int useTimes, Node node) {          if (node == null)               return null;          NodeInfo nodeInfo = times.get(useTimes);          if (nodeInfo == null)               return null;          node.poll();          if (nodeInfo.head.next == nodeInfo.tail)               times.remove(useTimes);          return node.val;     }    /**    *根据使用次数压入对应的链表尾部,如果没有对应的次数则初始化对应的头节点尾节点    */    private void offer(int useTimes, Node node) {          if (node == null)               return;           node.offer(times.computeIfAbsent(useTimes, k -> new NodeInfo()).tail);     }    @Override     public void put(K key, V value) {          restartFromHead: for (;;) {              if (capacity = capacity && (k = poll()) != null)                         cache.remove(k);                    // 保存新缓存                    cache.put(key, lfuCacheValue);                    // 将新缓存节点压入对应使用次数的尾节点前                    offer(lfuCacheValue.useTimes, lfuCacheValue.node);                    // 恢复可 new 状态                    state = 0;              }               break;          }     }    /**    *弹出使用频率最少的缓存    */    private K poll() {          Integer firstKey = null;          try {             //第一个即使用频率最少的,在构造 TreeMap 时就是以使用频率来排序依据               firstKey = times.firstKey();          } catch (NoSuchElementException e) {               return null;          }          //在相同的使用频率时,头节点的后置节点即最久未使用的节点          return poll(firstKey, times.get(firstKey).head.next);     }}

下面测试 LFU:

public class LFUCacheTest {    public static void main(String[] args) {          LFUCache lfuCache = new LFUCache(2);          lfuCache.put(1, 1);          lfuCache.put(2, 2);          System.out.println(lfuCache.get(2));          System.out.println("ans:2");          lfuCache.put(3, 3);          System.out.println(lfuCache.get(1));          System.out.println(lfuCache.get(2));          System.out.println(lfuCache.get(3));          System.out.println("ans:null,2,3");          lfuCache.put(4, 4);          System.out.println(lfuCache.get(1));          System.out.println(lfuCache.get(2));          System.out.println(lfuCache.get(3));          System.out.println(lfuCache.get(4));          System.out.println("ans:null,2,null,4");          lfuCache.put(5, 5);          System.out.println(lfuCache.get(1));          System.out.println(lfuCache.get(2));          System.out.println(lfuCache.get(3));          System.out.println(lfuCache.get(4));          System.out.println(lfuCache.get(5));          System.out.println("ans:null,2,null,null,5");    }   }

如发现有不足之处,还望指正,大家互相交流学习

阅读全文: http://gitbook.cn/gitchat/activity/5e8d738cfde1297a1631bb49

您还可以下载 CSDN 旗下精品原创内容社区 GitChat App ,阅读更多 GitChat 专享技术内容哦。

FtooAtPSkEJwnW-9xkCLqSTRpBKX

关注
打赏
1560489824
查看更多评论
立即登录/注册

微信扫码登录

0.1163s