lmcache.experimental.storage_backend.evictor package#

Submodules#

lmcache.experimental.storage_backend.evictor.base_evictor module#

class lmcache.experimental.storage_backend.evictor.base_evictor.BaseEvictor[source]#

Interface for cache evictor

abstract update_on_hit(key: CacheEngineKey, cache_dict: OrderedDict) None[source]#

Update cache_dict when a cache is used is used

Input:

key: a CacheEngineKey cache_dict: a dict consists of current cache

abstract update_on_put(cache_dict: OrderedDict, cache_size: int) Tuple[List[CacheEngineKey], PutStatus][source]#

Evict cache when a new cache comes and the storage is full

Input:

cache_dict: a dict consists of current cache kv_obj: the new kv cache to be injected

Returns:

return a list of keys to be evicted and a PutStatus to indicate whether the put is allowed

class lmcache.experimental.storage_backend.evictor.base_evictor.PutStatus(value, names=<not given>, *values, module=None, qualname=None, type=None, start=1, boundary=None)[source]#

Bases: Enum

ILLEGAL = 2#
LEGAL = 1#

lmcache.experimental.storage_backend.evictor.lru_evictor module#

class lmcache.experimental.storage_backend.evictor.lru_evictor.LRUEvictor(max_cache_size: float = 10.0)[source]#

Bases: BaseEvictor

LRU cache evictor

update_on_hit(key: CacheEngineKey | str, cache_dict: OrderedDict) None[source]#

Update cache_dict when a cache is used is used

Input:

key: a CacheEngineKey cache_dict: a dict consists of current cache

update_on_put(cache_dict: OrderedDict, cache_size: int) Tuple[List[CacheEngineKey], PutStatus][source]#

Evict cache when a new cache comes and the storage is full

Input:

cache_dict: a dict consists of current cache kv_obj: the new kv cache to be injected

Returns:

return a list of keys to be evicted and a PutStatus to indicate whether the put is allowed

Module contents#

class lmcache.experimental.storage_backend.evictor.LRUEvictor(max_cache_size: float = 10.0)[source]#

Bases: BaseEvictor

LRU cache evictor

update_on_hit(key: CacheEngineKey | str, cache_dict: OrderedDict) None[source]#

Update cache_dict when a cache is used is used

Input:

key: a CacheEngineKey cache_dict: a dict consists of current cache

update_on_put(cache_dict: OrderedDict, cache_size: int) Tuple[List[CacheEngineKey], PutStatus][source]#

Evict cache when a new cache comes and the storage is full

Input:

cache_dict: a dict consists of current cache kv_obj: the new kv cache to be injected

Returns:

return a list of keys to be evicted and a PutStatus to indicate whether the put is allowed

class lmcache.experimental.storage_backend.evictor.PutStatus(value, names=<not given>, *values, module=None, qualname=None, type=None, start=1, boundary=None)[source]#

Bases: Enum

ILLEGAL = 2#
LEGAL = 1#