版权声明:本文为博主原创文章,如果转载请给出原文链接:http://doofuu.com/article/4156205.html
LRU 缓存机制 设计和实现一个 LRU(最近最少使用)缓存数据结构,使它应该支持一下操作:get 和 put。 get(key) - 如果 key 存在于缓存中,则获取 key 的 value(总是正数),否则返回 -1。 put(key,value) - 如果 key 不存在,请设置或插入 value。当缓存达到其容量时,它应该在插入新项目之前使最近最少使用的项目作废。
python版本:
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.cache = {}
self.keys = []
self.capacity = capacity
def visit_key(self, key):
if key in self.keys:
self.keys.remove(key)
self.keys.append(key)
def elim_key(self):
key = self.keys[0]
self.keys = self.keys[1:]
del self.cache[key]
def get(self, key):
"""
:type key: int
:rtype: int
"""
if not key in self.cache:
return -1
self.visit_key(key)
return self.cache[key]
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if not key in self.cache:
if len(self.keys) == self.capacity:
self.elim_key()
self.cache[key] = value
self.visit_key(key)
def main():
s =
[["put","put","get","put","get","put","get","get","get"],[[1,1],[2,2],[1],[3,3],[2],[
4,4],[1],[3],[4]]]
obj = LRUCache(2)
l=[]
for i,c in enumerate(s[0]):
if(c == "get"):
l.append(obj.get(s[1][i][0]))
else:
obj.put(s[1][i][0], s[1][i][1])
print(l)
if __name__ == "__main__":
main()C++版本:
class LRUCache{
public:
LRUCache(int capacity) {
cap = capacity;
}
int get(int key) {
auto it = m.find(key);
if (it == m.end()) return -1;
l.splice(l.begin(), l, it->second);
return it->second->second;
}
void set(int key, int value) {
auto it = m.find(key);
if (it != m.end()) l.erase(it->second);
l.push_front(make_pair(key, value));
m[key] = l.begin();
if (m.size() > cap) {
int k = l.rbegin()->first;
l.pop_back();
m.erase(k);
}
}
}Erlang版本:
%%----------------------------------------------------
%% LRU缓存
%%----------------------------------------------------
-module(lru_cache).
-behaviour(gen_server).
-export([start_link/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-export([
insert/2
,delete/2
,lookup/2
,info/0
,resize/2
,clean/1
,member/2
]
).
-record(cache_cfg, {
%% 缓存名称
name :: atom()
%% 最大数量,=0表示不限制数量
,max_size = 0 :: non_neg_integer()
%% 缓存对象的主键位置
,keypos :: pos_integer()
}
).
-record(state, {
cache_names = [] :: [atom()]
}
).
-define(CHECK_LRU_CACHE_INTERVLAL, (30*1000)). %% 检查LRU缓存的间隔时间(毫秒)
-ifdef(debug).
-define(ROLE_CACHE_MAX_NUM, 1).
-else.
-define(ROLE_CACHE_MAX_NUM, 1000).
-endif.
%% ----------------------------------------------------
%% 外部接口
%% ----------------------------------------------------
%% @doc 启动
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%% @doc 写入缓存
insert(CacheName, Value) ->
NewTs = ts(),
Key = get_key(CacheName, Value),
ets:insert(CacheName, {Key, Value, NewTs}),
ok.
%% @doc 删除缓存
delete(CacheName, Key) ->
ets:delete(CacheName, Key),
ok.
%% @doc 读取缓存
%% (lookup不影响ts)
-spec lookup(CacheName :: atom(), Key :: term()) -> {ok, term()} | false.
lookup(CacheName, Key) ->
case ets:lookup(CacheName, Key) of
[{Key, Value, _Ts}] -> {ok, Value};
[] -> false
end.
%% @doc 判断缓存中是否存在
-spec member(CacheName :: atom(), Key :: term()) -> boolean().
member(CacheName, Key) ->
case ets:member(CacheName, Key) of
true -> true;
false -> false
end.
%% @doc 更新缓存最大记录数
resize(CacheName, MaxSize) ->
gen_server:cast(?MODULE, {resize, CacheName, MaxSize}).
%% @doc 清除缓存和ts
clean(CacheName) ->
ets:delete_all_objects(CacheName),
ok.
%% @doc 显示目前缓存信息
info() ->
gen_server:cast(?MODULE, info).
%% ----------------------------------------------------
%% 内部处理
%% ----------------------------------------------------
init([]) ->
?INFO("[~w] 正在启动...", [?MODULE]),
process_flag(trap_exit, true),
{ok, State} = do_init(),
erlang:send_after(?CHECK_LRU_CACHE_INTERVLAL, self(), check),
?INFO("[~w] 启动完成", [?MODULE]),
{ok, State}.
handle_call(_Request, _From, State) ->
{noreply, State}.
handle_cast({resize, CacheName, MaxSize}, State) ->
case get({cfg, CacheName}) of
Cfg = #cache_cfg{max_size = OldSize} ->
put({cfg, CacheName}, Cfg#cache_cfg{max_size = MaxSize}),
?INFO("LRU缓存[~w]的MaxSize变化: ~w => ~w", [CacheName, OldSize, MaxSize]);
_ ->
?ERR("找不到[~w]的LRU缓存配置", [CacheName])
end,
{noreply, State};
handle_cast(info, State = #state{cache_names = L}) ->
?INFO("LRU缓存信息:"),
lists:foreach(fun(CacheName) ->
#cache_cfg{keypos = KeyPos, max_size = MaxSize} = get({cfg, CacheName}),
?INFO("[~w] Ets Size=~w, keypos=~w, MaxSize=~w", [CacheName, ets:info(CacheName, size), KeyPos, MaxSize])
end, L),
{noreply, State};
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(check, State = #state{cache_names = L}) ->
lists:foreach(fun(CacheName) ->
do_check(CacheName)
end, L),
erlang:send_after(?CHECK_LRU_CACHE_INTERVLAL, self(), check),
{noreply, State};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
?INFO("[~w] 正在关闭...", [?MODULE]),
?INFO("[~w] 关闭完成", [?MODULE]),
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ----------------------------------------------------
%% 私有函数
%% ----------------------------------------------------
do_init() ->
put(cache_names, []),
%% 角色缓存(离线)
create(role_cache, [public, named_table, set, compressed, {keypos, get_keypos(role_cache)}, {write_concurrency, true}], ?ROLE_CACHE_MAX_NUM),
{ok, #state{cache_names = erase(cache_names)}};
do_init() ->
{ok, #state{}}.
get_keypos(role_cache) -> #role.id;
get_keypos(account_id_index) -> 1.
get_key(CacheName, Value) ->
KeyPos = get_keypos(CacheName),
erlang:element(KeyPos, Value).
%% 创建缓存
%% ets表的结构统一为:{Key, Object, Ts}
create(CacheName, CacheOpts, MaxSize) ->
{ok, NewOpts, KeyPos} = deal_opts(CacheOpts, []),
ets:new(CacheName, NewOpts),
Cfg = #cache_cfg{name = CacheName, max_size = MaxSize, keypos = KeyPos},
put({cfg, CacheName}, Cfg),
put(cache_names, [CacheName | get(cache_names)]),
ok.
deal_opts([], _) -> false;
deal_opts([{keypos, Pos} | T], Result) -> {ok, lists:concat([Result, T, [{keypos, 1}]]), Pos};
deal_opts([H | T], Result) ->
deal_opts(T, [H|Result]).
%% 检查缓存大小是否需要调整
%% 检查缓存大小是否需要调整
do_check(CacheName) ->
%% ?DEBUG("正在检查LRU缓存:~w", [CacheName]),
#cache_cfg{max_size = MaxSize} = get({cfg, CacheName}),
Keys = util:get_all_ets_key(CacheName),
Len = length(Keys),
case Len > MaxSize of
true ->
L = lists:foldl(fun(Key, Result) ->
case catch ets:lookup_element(CacheName, Key, 3) of
OldTs = {_, _, _} ->
[{OldTs, Key} | Result];
_ ->
Result
end
end, [], Keys),
L1 = lists:keysort(1, L),
{L2, _L3} = lists:split(Len-MaxSize, L1),
lists:foreach(fun({_Ts, Key}) ->
ets:delete(CacheName, Key)
end, L2),
?DEBUG("清理缓存[~w], ~w => ~w", [CacheName, Len, ets:info(CacheName, size)]);
false ->
ignore
end,
ok.
%% 生成操作时间戳
ts() -> os:timestamp().
共有 0 条评论 - LRU缓存机制设计与实现(Python、C++、Erlang)