2
0
mirror of synced 2025-02-24 14:48:27 +00:00

Remove cast(data as blob) workaround

Upstream merged the fix. Good performance boost.
This commit is contained in:
Matt Joiner 2021-01-19 17:54:17 +11:00
parent c424a2510e
commit 9d6bf7a4f0
2 changed files with 6 additions and 6 deletions

2
go.mod
View File

@ -2,7 +2,7 @@ module github.com/anacrolix/torrent
require ( require (
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512
crawshaw.io/sqlite v0.3.2 crawshaw.io/sqlite v0.3.3-0.20201116044518-95be3f88ee0f
github.com/RoaringBitmap/roaring v0.5.5 // indirect github.com/RoaringBitmap/roaring v0.5.5 // indirect
github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 // indirect github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 // indirect
github.com/alexflint/go-arg v1.3.0 github.com/alexflint/go-arg v1.3.0

View File

@ -77,7 +77,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
create index if not exists blob_last_used on blob(last_used); create index if not exists blob_last_used on blob(last_used);
-- While sqlite *seems* to be faster to get sum(length(data)) instead of -- While sqlite *seems* to be faster to get sum(length(data)) instead of
-- sum(length(cast(data as blob))), it may still require a large table scan at start-up or with a -- sum(length(data)), it may still require a large table scan at start-up or with a
-- cold-cache. With this we can be assured that it doesn't. -- cold-cache. With this we can be assured that it doesn't.
insert or ignore into blob_meta values ('size', 0); insert or ignore into blob_meta values ('size', 0);
@ -99,7 +99,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
(select value from blob_meta where key='size') as usage_with, (select value from blob_meta where key='size') as usage_with,
last_used, last_used,
rowid, rowid,
length(cast(data as blob)) length(data)
from blob order by last_used, rowid limit 1 from blob order by last_used, rowid limit 1
) )
where usage_with > (select value from setting where name='capacity') where usage_with > (select value from setting where name='capacity')
@ -108,7 +108,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
usage_with-data_length as new_usage_with, usage_with-data_length as new_usage_with,
blob.last_used, blob.last_used,
blob.rowid, blob.rowid,
length(cast(data as blob)) length(data)
from excess join blob from excess join blob
on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid)) on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid))
where new_usage_with > (select value from setting where name='capacity') where new_usage_with > (select value from setting where name='capacity')
@ -361,7 +361,7 @@ func (p *provider) WriteConsecutiveChunks(prefix string, w io.Writer) (written i
err = io.EOF err = io.EOF
err = sqlitex.Exec(conn, ` err = sqlitex.Exec(conn, `
select select
cast(data as blob), data,
cast(substr(name, ?+1) as integer) as offset cast(substr(name, ?+1) as integer) as offset
from blob from blob
where name like ?||'%' where name like ?||'%'
@ -712,7 +712,7 @@ func (i instance) ReadAt(p []byte, off int64) (n int, err error) {
gotRow := false gotRow := false
err = sqlitex.Exec( err = sqlitex.Exec(
conn, conn,
"select substr(cast(data as blob), ?, ?) from blob where name=?", "select substr(data, ?, ?) from blob where name=?",
func(stmt *sqlite.Stmt) error { func(stmt *sqlite.Stmt) error {
if gotRow { if gotRow {
panic("found multiple matching blobs") panic("found multiple matching blobs")