serialize the most recently used blitbuffer/koptcontext

to speedup koreader startup for PDF/DJVU documents
especially when reflowing
This commit is contained in:
chrox
2014-04-30 23:24:44 +08:00
parent f9302cd17d
commit 775e5ea3b4
9 changed files with 810 additions and 11 deletions

View File

@@ -1,6 +1,9 @@
--[[
A global LRU cache
]]--
require("MD5")
local DEBUG = require("dbg")
local function calcFreeMem()
local meminfo = io.open("/proc/meminfo", "r")
local freemem = 0
@@ -26,6 +29,22 @@ local function calcCacheMemSize()
return math.min(max, math.max(min, calc))
end
local cache_path = lfs.currentdir().."/cache/"
--[[
-- return a snapshot of disk cached items for subsequent check
--]]
function getDiskCache()
local cached = {}
for key_md5 in lfs.dir(cache_path) do
local file = cache_path..key_md5
if lfs.attributes(file, "mode") == "file" then
cached[key_md5] = file
end
end
return cached
end
local Cache = {
-- cache configuration:
max_memsize = calcCacheMemSize(),
@@ -34,7 +53,9 @@ local Cache = {
-- associative cache
cache = {},
-- this will hold the LRU order of the cache
cache_order = {}
cache_order = {},
-- disk Cache snapshot
cached = getDiskCache(),
}
function Cache:new(o)
@@ -64,7 +85,11 @@ function Cache:insert(key, object)
self.current_memsize = self.current_memsize + object.size
end
function Cache:check(key)
--[[
-- check for cache item for key
-- if ItemClass is given, disk cache is also checked.
--]]
function Cache:check(key, ItemClass)
if self.cache[key] then
if self.cache_order[1] ~= key then
-- put key in front of the LRU list
@@ -76,6 +101,18 @@ function Cache:check(key)
table.insert(self.cache_order, 1, key)
end
return self.cache[key]
elseif ItemClass then
local cached = self.cached[md5(key)]
if cached then
local item = ItemClass:new{}
local ok, msg = pcall(item.load, item, cached)
if ok then
self:insert(key, item)
return item
else
DEBUG("discard cache", msg)
end
end
end
end
@@ -86,6 +123,34 @@ function Cache:willAccept(size)
end
end
function Cache:serialize()
-- calculate disk cache size
local cached_size = 0
local sorted_caches = {}
for _,file in pairs(self.cached) do
table.insert(sorted_caches, {file=file, time=lfs.attributes(file, "access")})
cached_size = cached_size + (lfs.attributes(file, "size") or 0)
end
table.sort(sorted_caches, function(v1,v2) return v1.time > v2.time end)
-- serialize the most recently used cache
local cache_size = 0
for _, key in ipairs(self.cache_order) do
if self.cache[key].dump then
cache_size = self.cache[key]:dump(cache_path..md5(key)) or 0
if cache_size > 0 then break end
end
end
-- set disk cache the same limit as memory cache
while cached_size + cache_size - self.max_memsize > 0 do
-- discard the least recently used cache
local discarded = table.remove(sorted_caches)
cached_size = cached_size - lfs.attributes(discarded.file, "size")
os.remove(discarded.file)
end
-- disk cache may have changes so need to refresh disk cache snapshot
self.cached = getDiskCache()
end
-- blank the cache
function Cache:clear()
for k, _ in pairs(self.cache) do