From 7b63a4eddef568a3fc0eb7cc083992f6a8fb7ae0 Mon Sep 17 00:00:00 2001 From: Stephen Lombardo Date: Thu, 22 Jul 2010 09:16:22 -0400 Subject: [PATCH] track 3.7.0 --- Makefile.in | 351 +++-- Makefile.vxworks | 3 +- VERSION | 2 +- configure | 21 +- ext/async/sqlite3async.c | 17 +- ext/fts3/fts3.c | 343 +++-- ext/fts3/fts3Int.h | 14 + ext/fts3/fts3_snippet.c | 2 +- ext/rtree/rtree.c | 1 + ext/rtree/rtree1.test | 17 + main.mk | 72 +- manifest | 465 ++++--- manifest.uuid | 2 +- publish.sh | 27 +- src/alter.c | 29 +- src/analyze.c | 16 +- src/attach.c | 7 +- src/backup.c | 50 +- src/btree.c | 235 ++-- src/btree.h | 3 + src/btreeInt.h | 3 + src/build.c | 4 +- src/callback.c | 7 +- src/ctime.c | 17 +- src/date.c | 17 +- src/delete.c | 4 +- src/expr.c | 109 +- src/func.c | 22 +- src/global.c | 11 + src/insert.c | 4 +- src/journal.c | 6 +- src/main.c | 177 ++- src/malloc.c | 40 +- src/mem2.c | 45 +- src/memjournal.c | 19 +- src/mutex.c | 31 +- src/mutex_noop.c | 86 +- src/mutex_os2.c | 4 +- src/mutex_unix.c | 36 +- src/mutex_w32.c | 20 +- src/notify.c | 1 + src/os.c | 40 +- src/os.h | 12 +- src/os_common.h | 18 +- src/os_os2.c | 108 +- src/os_unix.c | 1962 +++++++++++++++++----------- src/os_win.c | 945 ++++++++++++-- src/pager.c | 1865 ++++++++++++++++++-------- src/pager.h | 20 +- src/parse.y | 45 +- src/pcache.c | 17 +- src/pcache1.c | 8 +- src/pragma.c | 159 ++- src/prepare.c | 17 +- src/resolve.c | 5 +- src/select.c | 31 +- src/shell.c | 8 +- src/sqlite.h.in | 341 +++-- src/sqliteInt.h | 79 +- src/sqliteLimit.h | 8 + src/status.c | 20 + src/tclsqlite.c | 138 +- src/test1.c | 71 +- src/test2.c | 3 + src/test6.c | 38 +- src/test_async.c | 3 +- src/test_config.c | 20 +- src/test_demovfs.c | 671 ++++++++++ src/test_devsym.c | 53 +- src/test_journal.c | 20 +- src/test_malloc.c | 21 + src/test_onefile.c | 15 +- src/test_osinst.c | 1628 ++++++++++++----------- src/test_stat.c | 609 +++++++++ src/test_thread.c | 4 + src/test_vfs.c | 1408 ++++++++++++++++++++ src/trigger.c | 2 + src/update.c | 6 +- src/vacuum.c | 19 +- src/vdbe.c | 238 ++-- src/vdbeInt.h | 2 +- src/vdbeapi.c | 52 +- src/vdbeaux.c | 40 +- src/vdbeblob.c | 4 + src/vdbemem.c | 13 +- src/vtab.c | 1 + src/wal.c | 2661 ++++++++++++++++++++++++++++++++++++++ src/wal.h | 107 ++ src/where.c | 493 ++++++- test/all.test | 147 +-- test/alter.test | 31 +- test/alter2.test | 24 +- test/analyze.test | 2 +- test/analyze2.test | 5 + test/async.test | 33 +- test/async4.test | 6 + test/autoindex1.test | 139 ++ test/autovacuum.test | 14 +- test/avtrans.test | 13 +- test/backup.test | 6 + test/backup2.test | 2 + test/backup_malloc.test | 3 +- test/bigfile.test | 8 + test/cache.test | 8 +- test/capi2.test | 8 +- test/capi3.test | 13 +- test/capi3b.test | 2 +- test/capi3c.test | 13 +- test/collate4.test | 3 +- test/corrupt.test | 5 + test/corrupt2.test | 85 +- test/corrupt3.test | 2 + test/corrupt4.test | 5 + test/corrupt6.test | 5 + test/corrupt7.test | 5 + test/corrupt8.test | 5 + test/corrupt9.test | 5 + test/corruptA.test | 4 +- test/corruptB.test | 7 +- test/corruptC.test | 18 +- test/corruptE.test | 12 + test/crash8.test | 3 +- test/ctime.test | 7 +- test/date.test | 55 +- test/dbstatus.test | 45 + test/descidx1.test | 5 + test/descidx2.test | 6 + test/descidx3.test | 5 + test/e_expr.test | 332 +++++ test/e_fts3.test | 1 + test/exclusive.test | 2 +- test/exclusive2.test | 2 +- test/filectrl.test | 2 +- test/filefmt.test | 6 + test/fkey2.test | 165 ++- test/fts2.test | 7 +- test/fts3.test | 59 +- test/fts3_common.tcl | 152 --- test/fts3an.test | 36 +- test/fts3query.test | 29 + test/fts3rnd.test | 1 + test/fuzz.test | 4 +- test/fuzz_malloc.test | 4 +- test/hook.test | 27 + test/incrblob.test | 4 +- test/incrvacuum.test | 22 +- test/index3.test | 6 +- test/init.test | 5 +- test/io.test | 17 +- test/journal2.test | 234 ++++ test/journal3.test | 60 + test/jrnlmode.test | 36 +- test/jrnlmode2.test | 34 +- test/lock2.test | 77 +- test/lock4.test | 2 + test/lock5.test | 2 +- test/lock6.test | 3 +- test/lock_common.tcl | 166 +++ test/lookaside.test | 10 + test/main.test | 20 +- test/malloc.test | 1 + test/mallocAll.test | 5 +- test/mallocC.test | 1 - test/mallocI.test | 1 + test/malloc_common.tcl | 491 +++++++ test/memleak.test | 5 +- test/memsubsys1.test | 9 + test/minmax3.test | 5 + test/misc1.test | 8 +- test/misc5.test | 24 +- test/nan.test | 29 +- test/notify3.test | 146 +++ test/pager1.test | 2246 ++++++++++++++++++++++++++++++++ test/pager2.test | 119 ++ test/pagerfault.test | 1048 +++++++++++++++ test/pagerfault2.test | 99 ++ test/pageropt.test | 2 +- test/pcache.test | 4 + test/permutations.test | 658 +++++----- test/pragma.test | 22 +- test/quick.test | 147 +-- test/rdonly.test | 12 +- test/rollback.test | 8 +- test/rowhash.test | 5 +- test/rtree.test | 5 +- test/savepoint.test | 542 +++++--- test/savepoint2.test | 6 + test/savepoint6.test | 15 +- test/schema3.test | 97 ++ test/select2.test | 4 +- test/select9.test | 4 +- test/selectC.test | 44 + test/shared3.test | 2 +- test/soak.test | 8 +- test/softheap1.test | 3 +- test/stat.test | 154 +++ test/stmt.test | 23 +- test/table.test | 1 + test/tclsqlite.test | 4 +- test/tempdb.test | 6 +- test/tester.tcl | 589 ++++++--- test/thread2.test | 116 -- test/thread_common.tcl | 12 +- test/tkt-02a8e81d44.test | 31 + test/tkt-26ff0c2d1e.test | 33 + test/tkt-80e031a00f.test | 206 +++ test/tkt-9d68c883.test | 53 + test/tkt-cbd054fa6b.test | 87 ++ test/tkt-d11f09d36e.test | 62 + test/tkt-f973c7ac31.test | 87 ++ test/tkt-fc62af4523.test | 84 ++ test/tkt3472.test | 39 - test/trans.test | 20 +- test/trigger7.test | 6 +- test/triggerA.test | 2 +- test/triggerC.test | 62 + test/vacuum.test | 13 + test/veryquick.test | 8 +- test/wal.test | 1446 +++++++++++++++++++++ test/wal2.test | 1151 +++++++++++++++++ test/wal3.test | 738 +++++++++++ test/wal4.test | 64 + test/wal_common.tcl | 91 ++ test/walbak.test | 279 ++++ test/walbig.test | 73 ++ test/walcksum.test | 393 ++++++ test/walcrash.test | 296 +++++ test/walcrash2.test | 99 ++ test/walfault.test | 450 +++++++ test/walhook.test | 109 ++ test/walmode.test | 380 ++++++ test/walslow.test | 73 ++ test/walthread.test | 527 ++++++++ test/where2.test | 31 + test/where3.test | 4 +- test/where7.test | 10 +- test/where8.test | 3 +- tool/lemon.c | 28 +- tool/mksqlite3c.tcl | 2 + tool/shell1.test | 17 +- tool/shell2.test | 2 + tool/shell3.test | 1 + tool/showdb.c | 267 +++- tool/showwal.c | 355 +++++ tool/spaceanal.tcl | 417 ++---- 245 files changed, 28070 insertions(+), 5438 deletions(-) create mode 100644 src/test_demovfs.c create mode 100644 src/test_stat.c create mode 100644 src/test_vfs.c create mode 100644 src/wal.c create mode 100644 src/wal.h create mode 100644 test/autoindex1.test create mode 100644 test/dbstatus.test create mode 100644 test/e_expr.test create mode 100644 test/journal2.test create mode 100644 test/journal3.test create mode 100644 test/lock_common.tcl create mode 100644 test/notify3.test create mode 100644 test/pager1.test create mode 100644 test/pager2.test create mode 100644 test/pagerfault.test create mode 100644 test/pagerfault2.test create mode 100644 test/schema3.test create mode 100644 test/stat.test create mode 100644 test/tkt-02a8e81d44.test create mode 100644 test/tkt-26ff0c2d1e.test create mode 100644 test/tkt-80e031a00f.test create mode 100644 test/tkt-9d68c883.test create mode 100644 test/tkt-cbd054fa6b.test create mode 100644 test/tkt-d11f09d36e.test create mode 100644 test/tkt-f973c7ac31.test create mode 100644 test/tkt-fc62af4523.test delete mode 100644 test/tkt3472.test create mode 100644 test/wal.test create mode 100644 test/wal2.test create mode 100644 test/wal3.test create mode 100644 test/wal4.test create mode 100644 test/wal_common.tcl create mode 100644 test/walbak.test create mode 100644 test/walbig.test create mode 100644 test/walcksum.test create mode 100644 test/walcrash.test create mode 100644 test/walcrash2.test create mode 100644 test/walfault.test create mode 100644 test/walhook.test create mode 100644 test/walmode.test create mode 100644 test/walslow.test create mode 100644 test/walthread.test create mode 100644 tool/showwal.c diff --git a/Makefile.in b/Makefile.in index e954ccb..4974194 100644 --- a/Makefile.in +++ b/Makefile.in @@ -161,31 +161,35 @@ NAWK = @AWK@ # You should not have to change anything below this line ############################################################################### +USE_AMALGAMATION = @USE_AMALGAMATION@ + # Object files for the SQLite library (non-amalgamation). # -OBJS0 = alter.lo analyze.lo attach.lo auth.lo backup.lo bitvec.lo btmutex.lo \ - btree.lo build.lo callback.lo complete.lo ctime.lo date.lo \ - delete.lo expr.lo fault.lo fkey.lo func.lo global.lo \ - hash.lo journal.lo insert.lo legacy.lo loadext.lo \ - main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ - memjournal.lo \ - mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ - notify.lo opcodes.lo os.lo os_unix.lo os_win.lo os_os2.lo \ - pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ - random.lo resolve.lo rowset.lo select.lo status.lo \ - table.lo tokenize.lo trigger.lo update.lo \ - util.lo vacuum.lo \ - vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbetrace.lo \ - walker.lo where.lo utf.lo vtab.lo +LIBOBJS0 = alter.lo analyze.lo attach.lo auth.lo \ + backup.lo bitvec.lo btmutex.lo btree.lo build.lo \ + callback.lo complete.lo ctime.lo date.lo delete.lo expr.lo fault.lo fkey.lo \ + fts3.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo fts3_porter.lo \ + fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo fts3_write.lo \ + func.lo global.lo hash.lo \ + icu.lo insert.lo journal.lo legacy.lo loadext.lo \ + main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ + memjournal.lo \ + mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ + notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \ + pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ + random.lo resolve.lo rowset.lo rtree.lo select.lo status.lo \ + table.lo tokenize.lo trigger.lo \ + update.lo util.lo vacuum.lo \ + vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbetrace.lo \ + wal.lo walker.lo where.lo utf.o vtab.lo # Object files for the amalgamation. # -OBJS1 = sqlite3.lo +LIBOBJS1 = sqlite3.lo # Determine the real value of LIBOBJ based on the 'configure' script # -USE_AMALGAMATION = @USE_AMALGAMATION@ -LIBOBJ = $(OBJS$(USE_AMALGAMATION)) +LIBOBJ = $(LIBOBJS$(USE_AMALGAMATION)) # All of the source code files. @@ -276,20 +280,11 @@ SRC = \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ + $(TOP)/src/wal.c \ + $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c -# Generated source code files -# -SRC += \ - keywordhash.h \ - opcodes.c \ - opcodes.h \ - parse.c \ - parse.h \ - config.h \ - sqlite3.h - # Source code for extensions # SRC += \ @@ -331,44 +326,19 @@ SRC += \ $(TOP)/ext/rtree/rtree.h \ $(TOP)/ext/rtree/rtree.c -# Source code to the library files needed by the test fixture -# -TESTSRC2 = \ - $(TOP)/src/attach.c \ - $(TOP)/src/backup.c \ - $(TOP)/src/bitvec.c \ - $(TOP)/src/btree.c \ - $(TOP)/src/build.c \ - $(TOP)/src/date.c \ - $(TOP)/src/ctime.c \ - $(TOP)/src/expr.c \ - $(TOP)/src/func.c \ - $(TOP)/src/insert.c \ - $(TOP)/src/mem5.c \ - $(TOP)/src/os.c \ - $(TOP)/src/os_os2.c \ - $(TOP)/src/os_unix.c \ - $(TOP)/src/os_win.c \ - $(TOP)/src/pager.c \ - $(TOP)/src/pcache.c \ - $(TOP)/src/pcache1.c \ - $(TOP)/src/pragma.c \ - $(TOP)/src/prepare.c \ - $(TOP)/src/printf.c \ - $(TOP)/src/random.c \ - $(TOP)/src/select.c \ - $(TOP)/src/tokenize.c \ - $(TOP)/src/utf.c \ - $(TOP)/src/util.c \ - $(TOP)/src/vdbe.c \ - $(TOP)/src/vdbeapi.c \ - $(TOP)/src/vdbeaux.c \ - $(TOP)/src/vdbemem.c \ - $(TOP)/src/vdbetrace.c \ - $(TOP)/src/where.c \ - parse.c -# Source code to the actual test files. +# Generated source code files +# +SRC += \ + keywordhash.h \ + opcodes.c \ + opcodes.h \ + parse.c \ + parse.h \ + config.h \ + sqlite3.h + +# Source code to the test files. # TESTSRC = \ $(TOP)/src/test1.c \ @@ -385,6 +355,7 @@ TESTSRC = \ $(TOP)/src/test_backup.c \ $(TOP)/src/test_btree.c \ $(TOP)/src/test_config.c \ + $(TOP)/src/test_demovfs.c \ $(TOP)/src/test_devsym.c \ $(TOP)/src/test_func.c \ $(TOP)/src/test_hexio.c \ @@ -398,60 +369,98 @@ TESTSRC = \ $(TOP)/src/test_pcache.c \ $(TOP)/src/test_schema.c \ $(TOP)/src/test_server.c \ + $(TOP)/src/test_stat.c \ $(TOP)/src/test_tclvar.c \ $(TOP)/src/test_thread.c \ + $(TOP)/src/test_vfs.c \ $(TOP)/src/test_wsd.c +# Source code to the library files needed by the test fixture +# +TESTSRC2 = \ + $(TOP)/src/attach.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/bitvec.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/build.c \ + $(TOP)/src/ctime.c \ + $(TOP)/src/date.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/insert.c \ + $(TOP)/src/wal.c \ + $(TOP)/src/mem5.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache1.c \ + $(TOP)/src/select.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/vdbetrace.c \ + $(TOP)/src/where.c \ + parse.c \ + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_write.c \ + $(TOP)/ext/async/sqlite3async.c + # Header files used by all library source files. # HDR = \ - sqlite3.h \ $(TOP)/src/btree.h \ $(TOP)/src/btreeInt.h \ $(TOP)/src/hash.h \ $(TOP)/src/hwtime.h \ - $(TOP)/src/sqliteLimit.h \ + keywordhash.h \ $(TOP)/src/mutex.h \ opcodes.h \ $(TOP)/src/os.h \ $(TOP)/src/os_common.h \ + $(TOP)/src/pager.h \ + $(TOP)/src/pcache.h \ + parse.h \ + sqlite3.h \ $(TOP)/src/sqlite3ext.h \ $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeInt.h \ - parse.h \ config.h # Header files used by extensions # -HDR += \ +EXTHDR += \ $(TOP)/ext/fts1/fts1.h \ $(TOP)/ext/fts1/fts1_hash.h \ $(TOP)/ext/fts1/fts1_tokenizer.h -HDR += \ +EXTHDR += \ $(TOP)/ext/fts2/fts2.h \ $(TOP)/ext/fts2/fts2_hash.h \ $(TOP)/ext/fts2/fts2_tokenizer.h -HDR += \ +EXTHDR += \ $(TOP)/ext/fts3/fts3.h \ $(TOP)/ext/fts3/fts3Int.h \ $(TOP)/ext/fts3/fts3_hash.h \ $(TOP)/ext/fts3/fts3_tokenizer.h -HDR += \ +EXTHDR += \ $(TOP)/ext/rtree/rtree.h -HDR += \ +EXTHDR += \ $(TOP)/ext/icu/sqliteicu.h -# If using the amalgamation, use sqlite3.c directly to build the test -# fixture. Otherwise link against libsqlite3.la. (This distinction is -# necessary because the test fixture requires non-API symbols which are -# hidden when the library is built via the amalgamation). -# -TESTFIXTURE_SRC0 = $(TESTSRC2) libsqlite3.la -TESTFIXTURE_SRC1 = sqlite3.c -TESTFIXTURE_SRC = $(TESTSRC) $(TOP)/src/tclsqlite.c $(TESTFIXTURE_SRC$(USE_AMALGAMATION)) - - # This is the default Makefile target. The objects listed here # are what get build when you type just "make" with no arguments. # @@ -485,10 +494,10 @@ sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h # files are automatically generated. This target takes care of # all that automatic generation. # -.target_source: $(SRC) +.target_source: $(SRC) $(TOP)/tool/vdbe-compress.tcl rm -rf tsrc - mkdir -p tsrc - cp $(SRC) tsrc + mkdir tsrc + cp -f $(SRC) tsrc rm tsrc/sqlite.h.in tsrc/parse.y $(TCLSH_CMD) $(TOP)/tool/vdbe-compress.tcl vdbe.new mv vdbe.new tsrc/vdbe.c @@ -497,19 +506,30 @@ sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h sqlite3.c: .target_source $(TOP)/tool/mksqlite3c.tcl $(TCLSH_CMD) $(TOP)/tool/mksqlite3c.tcl +# Rule to build the amalgamation +# +sqlite3.lo: sqlite3.c + $(LTCOMPILE) $(TEMP_STORE) -c sqlite3.c + # Rules to build the LEMON compiler generator # lemon$(BEXE): $(TOP)/tool/lemon.c $(TOP)/src/lempar.c $(BCC) -o $@ $(TOP)/tool/lemon.c cp $(TOP)/src/lempar.c . - -# Rule to build the amalgamation +# Rules to build individual *.o files from generated *.c files. This +# applies to: # -sqlite3.lo: sqlite3.c - $(LTCOMPILE) $(TEMP_STORE) -c sqlite3.c +# parse.o +# opcodes.o +# +parse.lo: parse.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c parse.c -# Rules to build individual files +opcodes.lo: opcodes.c + $(LTCOMPILE) $(TEMP_STORE) -c opcodes.c + +# Rules to build individual *.o files from files in the src directory. # alter.lo: $(TOP)/src/alter.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/alter.c @@ -634,15 +654,6 @@ pcache.lo: $(TOP)/src/pcache.c $(HDR) $(TOP)/src/pcache.h pcache1.lo: $(TOP)/src/pcache1.c $(HDR) $(TOP)/src/pcache.h $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache1.c -opcodes.lo: opcodes.c - $(LTCOMPILE) $(TEMP_STORE) -c opcodes.c - -opcodes.c: opcodes.h $(TOP)/mkopcodec.awk - sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c - -opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk - cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h - os.lo: $(TOP)/src/os.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os.c @@ -655,17 +666,6 @@ os_win.lo: $(TOP)/src/os_win.c $(HDR) os_os2.lo: $(TOP)/src/os_os2.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_os2.c -parse.lo: parse.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c parse.c - -parse.h: parse.c - -parse.c: $(TOP)/src/parse.y lemon$(BEXE) $(TOP)/addopcodes.awk - cp $(TOP)/src/parse.y . - ./lemon$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) parse.y - mv parse.h parse.h.temp - $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h - pragma.lo: $(TOP)/src/pragma.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pragma.c @@ -690,22 +690,12 @@ select.lo: $(TOP)/src/select.c $(HDR) status.lo: $(TOP)/src/status.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/status.c -sqlite3.h: $(TOP)/src/sqlite.h.in $(TOP)/manifest.uuid $(TOP)/VERSION - tclsh $(TOP)/tool/mksqlite3h.tcl $(TOP) >sqlite3.h - table.lo: $(TOP)/src/table.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/table.c -tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) - $(LTCOMPILE) -DUSE_TCL_STUBS=1 -c $(TOP)/src/tclsqlite.c - tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/tokenize.c -keywordhash.h: $(TOP)/tool/mkkeywordhash.c - $(BCC) -o mkkeywordhash$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) $(TOP)/tool/mkkeywordhash.c - ./mkkeywordhash$(BEXE) >keywordhash.h - trigger.lo: $(TOP)/src/trigger.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/trigger.c @@ -742,12 +732,18 @@ vdbetrace.lo: $(TOP)/src/vdbetrace.c $(HDR) vtab.lo: $(TOP)/src/vtab.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vtab.c +wal.lo: $(TOP)/src/wal.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/wal.c + walker.lo: $(TOP)/src/walker.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/walker.c where.lo: $(TOP)/src/where.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/where.c +tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -DUSE_TCL_STUBS=1 -c $(TOP)/src/tclsqlite.c + tclsqlite-shell.lo: $(TOP)/src/tclsqlite.c $(HDR) $(LTCOMPILE) -DTCLSH=1 -o $@ -c $(TOP)/src/tclsqlite.c @@ -758,16 +754,113 @@ tclsqlite3$(TEXE): tclsqlite-shell.lo libsqlite3.la $(LTLINK) -o $@ tclsqlite-shell.lo \ libsqlite3.la $(LIBTCL) +# Rules to build opcodes.c and opcodes.h +# +opcodes.c: opcodes.h $(TOP)/mkopcodec.awk + sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c + +opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk + cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h + +# Rules to build parse.c and parse.h - the outputs of lemon. +# +parse.h: parse.c + +parse.c: $(TOP)/src/parse.y lemon$(BEXE) $(TOP)/addopcodes.awk + cp $(TOP)/src/parse.y . + rm -f parse.h + ./lemon$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) parse.y + mv parse.h parse.h.temp + $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + +sqlite3.h: $(TOP)/src/sqlite.h.in $(TOP)/manifest.uuid $(TOP)/VERSION + tclsh $(TOP)/tool/mksqlite3h.tcl $(TOP) >sqlite3.h + +keywordhash.h: $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) $(TOP)/tool/mkkeywordhash.c + ./mkkeywordhash$(BEXE) >keywordhash.h + + + +# Rules to build the extension objects. +# +icu.lo: $(TOP)/ext/icu/icu.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/icu/icu.c + +fts2.lo: $(TOP)/ext/fts2/fts2.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2.c + +fts2_hash.lo: $(TOP)/ext/fts2/fts2_hash.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_hash.c + +fts2_icu.lo: $(TOP)/ext/fts2/fts2_icu.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_icu.c + +fts2_porter.lo: $(TOP)/ext/fts2/fts2_porter.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_porter.c + +fts2_tokenizer.lo: $(TOP)/ext/fts2/fts2_tokenizer.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer.c + +fts2_tokenizer1.lo: $(TOP)/ext/fts2/fts2_tokenizer1.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer1.c + +fts3.lo: $(TOP)/ext/fts3/fts3.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3.c + +fts3_expr.lo: $(TOP)/ext/fts3/fts3_expr.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_expr.c + +fts3_hash.lo: $(TOP)/ext/fts3/fts3_hash.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_hash.c + +fts3_icu.lo: $(TOP)/ext/fts3/fts3_icu.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_icu.c + +fts3_snippet.lo: $(TOP)/ext/fts3/fts3_snippet.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_snippet.c + +fts3_porter.lo: $(TOP)/ext/fts3/fts3_porter.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_porter.c + +fts3_tokenizer.lo: $(TOP)/ext/fts3/fts3_tokenizer.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer.c + +fts3_tokenizer1.lo: $(TOP)/ext/fts3/fts3_tokenizer1.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer1.c + +fts3_write.lo: $(TOP)/ext/fts3/fts3_write.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_write.c + +rtree.lo: $(TOP)/ext/rtree/rtree.c $(HDR) $(EXTHDR) + $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/rtree/rtree.c + + +# Rules to build the 'testfixture' application. +# +# If using the amalgamation, use sqlite3.c directly to build the test +# fixture. Otherwise link against libsqlite3.la. (This distinction is +# necessary because the test fixture requires non-API symbols which are +# hidden when the library is built via the amalgamation). +# +TESTFIXTURE_FLAGS = -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 +TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE + +TESTFIXTURE_SRC0 = $(TESTSRC2) libsqlite3.la +TESTFIXTURE_SRC1 = sqlite3.c +TESTFIXTURE_SRC = $(TESTSRC) $(TOP)/src/tclsqlite.c $(TESTFIXTURE_SRC$(USE_AMALGAMATION)) + testfixture$(TEXE): $(TESTFIXTURE_SRC) - $(LTLINK) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_NO_SYNC=1\ - -DSQLITE_CRASH_TEST=1 \ - -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE $(TEMP_STORE) \ + $(LTLINK) -DSQLITE_NO_SYNC=1 $(TEMP_STORE) $(TESTFIXTURE_FLAGS) \ -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) $(TLIBS) fulltest: testfixture$(TEXE) sqlite3$(TEXE) ./testfixture$(TEXE) $(TOP)/test/all.test +soaktest: testfixture$(TEXE) sqlite3$(TEXE) + ./testfixture$(TEXE) $(TOP)/test/all.test -soak=1 + test: testfixture$(TEXE) sqlite3$(TEXE) ./testfixture$(TEXE) $(TOP)/test/veryquick.test @@ -781,9 +874,11 @@ sqlite3_analyzer$(TEXE): $(TESTFIXTURE_SRC) $(TOP)/tool/spaceanal.tcl $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h $(LTLINK) -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE \ - $(TEMP_STORE) -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) + $(TEMP_STORE) -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) $(TLIBS) +# Standard install and cleanup targets +# lib_install: libsqlite3.la $(INSTALL) -d $(DESTDIR)$(libdir) $(LTINSTALL) libsqlite3.la $(DESTDIR)$(libdir) @@ -808,15 +903,15 @@ tcl_install: lib_install libtclsqlite3.la pkgIndex.tcl clean: rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* - rm -rf .libs .deps tsrc + rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out + rm -rf tsrc .target_source rm -f testfixture$(TEXE) test.db - rm -f common.tcl rm -f sqlite3.dll sqlite3.lib sqlite3.def - rm -f sqlite3.c .target_source + rm -f sqlite3.c distclean: clean rm -f config.log config.status libtool Makefile sqlite3.pc diff --git a/Makefile.vxworks b/Makefile.vxworks index 273ce13..b3f648e 100644 --- a/Makefile.vxworks +++ b/Makefile.vxworks @@ -386,6 +386,7 @@ TESTSRC = \ $(TOP)/src/test_server.c \ $(TOP)/src/test_tclvar.c \ $(TOP)/src/test_thread.c \ + $(TOP)/src/test_vfs.c \ $(TOP)/src/test_wsd.c \ #TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c @@ -624,7 +625,7 @@ fulltest: testfixture$(EXE) sqlite3$(EXE) ./testfixture$(EXE) $(TOP)/test/all.test soaktest: testfixture$(EXE) sqlite3$(EXE) - ./testfixture$(EXE) $(TOP)/test/all.test -soak 1 + ./testfixture$(EXE) $(TOP)/test/all.test -soak=1 test: testfixture$(EXE) sqlite3$(EXE) ./testfixture$(EXE) $(TOP)/test/veryquick.test diff --git a/VERSION b/VERSION index cc690cc..7c69a55 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.6.23.1 +3.7.0 diff --git a/configure b/configure index 6b9b772..49d7216 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.62 for sqlite 3.6.23.1. +# Generated by GNU Autoconf 2.62 for sqlite 3.7.0. # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. @@ -743,8 +743,8 @@ SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='sqlite' PACKAGE_TARNAME='sqlite' -PACKAGE_VERSION='3.6.23.1' -PACKAGE_STRING='sqlite 3.6.23.1' +PACKAGE_VERSION='3.7.0' +PACKAGE_STRING='sqlite 3.7.0' PACKAGE_BUGREPORT='' # Factoring default headers for most tests. @@ -1487,7 +1487,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures sqlite 3.6.23.1 to adapt to many kinds of systems. +\`configure' configures sqlite 3.7.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1552,7 +1552,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of sqlite 3.6.23.1:";; + short | recursive ) echo "Configuration of sqlite 3.7.0:";; esac cat <<\_ACEOF @@ -1670,7 +1670,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -sqlite configure 3.6.23.1 +sqlite configure 3.7.0 generated by GNU Autoconf 2.62 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, @@ -1684,7 +1684,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by sqlite $as_me 3.6.23.1, which was +It was created by sqlite $as_me 3.7.0, which was generated by GNU Autoconf 2.62. Invocation command line was $ $0 $@ @@ -13972,7 +13972,7 @@ exec 6>&1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by sqlite $as_me 3.6.23.1, which was +This file was extended by sqlite $as_me 3.7.0, which was generated by GNU Autoconf 2.62. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -14025,7 +14025,7 @@ Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_version="\\ -sqlite config.status 3.6.23.1 +sqlite config.status 3.7.0 configured by $0, generated by GNU Autoconf 2.62, with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" @@ -14458,7 +14458,8 @@ $debug || if test -n "$CONFIG_FILES"; then -ac_cr=' ' +ac_cr=' +' ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' diff --git a/ext/async/sqlite3async.c b/ext/async/sqlite3async.c index 127942b..a351eaa 100644 --- a/ext/async/sqlite3async.c +++ b/ext/async/sqlite3async.c @@ -667,7 +667,7 @@ static int asyncRead( ){ AsyncFileData *p = ((AsyncFile *)pFile)->pData; int rc = SQLITE_OK; - sqlite3_int64 filesize; + sqlite3_int64 filesize = 0; sqlite3_file *pBase = p->pBaseRead; sqlite3_int64 iAmt64 = (sqlite3_int64)iAmt; @@ -690,7 +690,7 @@ static int asyncRead( } nRead = MIN(filesize - iOffset, iAmt64); if( nRead>0 ){ - rc = pBase->pMethods->xRead(pBase, zOut, nRead, iOffset); + rc = pBase->pMethods->xRead(pBase, zOut, (int)nRead, iOffset); ASYNC_TRACE(("READ %s %d bytes at %d\n", p->zName, nRead, iOffset)); } } @@ -717,9 +717,11 @@ static int asyncRead( if( iBeginIn<0 ) iBeginIn = 0; if( iBeginOut<0 ) iBeginOut = 0; + filesize = MAX(filesize, pWrite->iOffset+nByte64); + nCopy = MIN(nByte64-iBeginIn, iAmt64-iBeginOut); if( nCopy>0 ){ - memcpy(&((char *)zOut)[iBeginOut], &pWrite->zBuf[iBeginIn], nCopy); + memcpy(&((char *)zOut)[iBeginOut], &pWrite->zBuf[iBeginIn], (size_t)nCopy); ASYNC_TRACE(("OVERREAD %d bytes at %d\n", nCopy, iBeginOut+iOffset)); } } @@ -728,6 +730,9 @@ static int asyncRead( asyncread_out: async_mutex_leave(ASYNC_MUTEX_QUEUE); + if( rc==SQLITE_OK && filesize<(iOffset+iAmt) ){ + rc = SQLITE_IOERR_SHORT_READ; + } return rc; } @@ -1138,7 +1143,6 @@ static int asyncOpen( async_mutex_leave(ASYNC_MUTEX_LOCK); if( rc==SQLITE_OK ){ - incrOpenFileCount(); pData->pLock = pLock; } @@ -1155,7 +1159,10 @@ static int asyncOpen( } if( rc!=SQLITE_OK ){ p->pMethod = 0; + }else{ + incrOpenFileCount(); } + return rc; } @@ -1232,7 +1239,7 @@ static int asyncFullPathname( if( rc==SQLITE_OK ){ int i, j; char *z = zPathOut; - int n = strlen(z); + int n = (int)strlen(z); while( n>1 && z[n-1]=='/' ){ n--; } for(i=j=0; ibHasDocsize boolean is true (indicating that this is an +** FTS4 table, not an FTS3 table) then also create the %_docsize and +** %_stat tables required by FTS4. */ static int fts3CreateTables(Fts3Table *p){ int rc = SQLITE_OK; /* Return code */ @@ -604,6 +607,9 @@ static int fts3CreateTables(Fts3Table *p){ ** An sqlite3_exec() callback for fts3TableExists. */ static int fts3TableExistsCallback(void *pArg, int n, char **pp1, char **pp2){ + UNUSED_PARAMETER(n); + UNUSED_PARAMETER(pp1); + UNUSED_PARAMETER(pp2); *(int*)pArg = 1; return 1; } @@ -629,7 +635,7 @@ static void fts3TableExists( ); rc = sqlite3_exec(db, zSql, fts3TableExistsCallback, &res, 0); sqlite3_free(zSql); - *pResult = res & 0xff; + *pResult = (u8)(res & 0xff); if( rc!=SQLITE_ABORT ) *pRc = rc; } @@ -639,7 +645,7 @@ static void fts3TableExists( ** ** The argv[] array contains the following: ** -** argv[0] -> module name +** argv[0] -> module name ("fts3" or "fts4") ** argv[1] -> database name ** argv[2] -> table name ** argv[...] -> "column name" and other module argument fields. @@ -658,12 +664,12 @@ static int fts3InitVtab( int rc; /* Return code */ int i; /* Iterator variable */ int nByte; /* Size of allocation used for *p */ - int iCol; - int nString = 0; - int nCol = 0; - char *zCsr; - int nDb; - int nName; + int iCol; /* Column index */ + int nString = 0; /* Bytes required to hold all column names */ + int nCol = 0; /* Number of columns in the FTS table */ + char *zCsr; /* Space for holding column names */ + int nDb; /* Bytes required to hold database name */ + int nName; /* Bytes required to hold table name */ const char *zTokenizer = 0; /* Name of tokenizer to use */ sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ @@ -893,6 +899,11 @@ static int fulltextClose(sqlite3_vtab_cursor *pCursor){ return SQLITE_OK; } +/* +** Position the pCsr->pStmt statement so that it is on the row +** of the %_content table that contains the last match. Return +** SQLITE_OK on success. +*/ static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ if( pCsr->isRequireSeek ){ pCsr->isRequireSeek = 0; @@ -919,6 +930,17 @@ static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ } } +/* +** Advance the cursor to the next row in the %_content table that +** matches the search criteria. For a MATCH search, this will be +** the next row that matches. For a full-table scan, this will be +** simply the next row in the %_content table. For a docid lookup, +** this routine simply sets the EOF flag. +** +** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned +** even if we reach end-of-file. The fts3EofMethod() will be called +** subsequently to determine whether or not an EOF was hit. +*/ static int fts3NextMethod(sqlite3_vtab_cursor *pCursor){ int rc = SQLITE_OK; /* Return code */ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; @@ -1055,6 +1077,11 @@ static void fts3PutDeltaVarint( ** start of a position-list. After it returns, *ppPoslist points to the ** first byte after the position-list. ** +** A position list is list of positions (delta encoded) and columns for +** a single document record of a doclist. So, in other words, this +** routine advances *ppPoslist so that it points to the next docid in +** the doclist, or to the first byte past the end of the doclist. +** ** If pp is not NULL, then the contents of the position list are copied ** to *pp. *pp is set to point to the first byte past the last byte copied ** before this function returns. @@ -1064,17 +1091,20 @@ static void fts3PoslistCopy(char **pp, char **ppPoslist){ char c = 0; /* The end of a position list is marked by a zero encoded as an FTS3 - ** varint. A single 0x00 byte. Except, if the 0x00 byte is preceded by + ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail ** of some other, multi-byte, value. ** - ** The following block moves pEnd to point to the first byte that is not + ** The following while-loop moves pEnd to point to the first byte that is not ** immediately preceded by a byte with the 0x80 bit set. Then increments ** pEnd once more so that it points to the byte immediately following the ** last byte in the position-list. */ - while( *pEnd | c ) c = *pEnd++ & 0x80; - pEnd++; + while( *pEnd | c ){ + c = *pEnd++ & 0x80; + testcase( c!=0 && (*pEnd)==0 ); + } + pEnd++; /* Advance past the POS_END terminator byte */ if( pp ){ int n = (int)(pEnd - *ppPoslist); @@ -1086,12 +1116,34 @@ static void fts3PoslistCopy(char **pp, char **ppPoslist){ *ppPoslist = pEnd; } +/* +** When this function is called, *ppPoslist is assumed to point to the +** start of a column-list. After it returns, *ppPoslist points to the +** to the terminator (POS_COLUMN or POS_END) byte of the column-list. +** +** A column-list is list of delta-encoded positions for a single column +** within a single document within a doclist. +** +** The column-list is terminated either by a POS_COLUMN varint (1) or +** a POS_END varint (0). This routine leaves *ppPoslist pointing to +** the POS_COLUMN or POS_END that terminates the column-list. +** +** If pp is not NULL, then the contents of the column-list are copied +** to *pp. *pp is set to point to the first byte past the last byte copied +** before this function returns. The POS_COLUMN or POS_END terminator +** is not copied into *pp. +*/ static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ char *pEnd = *ppPoslist; char c = 0; - /* A column-list is terminated by either a 0x01 or 0x00. */ - while( 0xFE & (*pEnd | c) ) c = *pEnd++ & 0x80; + /* A column-list is terminated by either a 0x01 or 0x00 byte that is + ** not part of a multi-byte varint. + */ + while( 0xFE & (*pEnd | c) ){ + c = *pEnd++ & 0x80; + testcase( c!=0 && ((*pEnd)&0xfe)==0 ); + } if( pp ){ int n = (int)(pEnd - *ppPoslist); char *p = *pp; @@ -1103,37 +1155,45 @@ static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ } /* -** Value used to signify the end of an offset-list. This is safe because +** Value used to signify the end of an position-list. This is safe because ** it is not possible to have a document with 2^31 terms. */ -#define OFFSET_LIST_END 0x7fffffff +#define POSITION_LIST_END 0x7fffffff /* -** This function is used to help parse offset-lists. When this function is -** called, *pp may point to the start of the next varint in the offset-list -** being parsed, or it may point to 1 byte past the end of the offset-list -** (in which case **pp will be 0x00 or 0x01). +** This function is used to help parse position-lists. When this function is +** called, *pp may point to the start of the next varint in the position-list +** being parsed, or it may point to 1 byte past the end of the position-list +** (in which case **pp will be a terminator bytes POS_END (0) or +** (1)). ** -** If *pp points past the end of the current offset list, set *pi to -** OFFSET_LIST_END and return. Otherwise, read the next varint from *pp, +** If *pp points past the end of the current position-list, set *pi to +** POSITION_LIST_END and return. Otherwise, read the next varint from *pp, ** increment the current value of *pi by the value read, and set *pp to ** point to the next value before returning. +** +** Before calling this routine *pi must be initialized to the value of +** the previous position, or zero if we are reading the first position +** in the position-list. Because positions are delta-encoded, the value +** of the previous position is needed in order to compute the value of +** the next position. */ static void fts3ReadNextPos( - char **pp, /* IN/OUT: Pointer into offset-list buffer */ - sqlite3_int64 *pi /* IN/OUT: Value read from offset-list */ + char **pp, /* IN/OUT: Pointer into position-list buffer */ + sqlite3_int64 *pi /* IN/OUT: Value read from position-list */ ){ - if( **pp&0xFE ){ + if( (**pp)&0xFE ){ fts3GetDeltaVarint(pp, pi); *pi -= 2; }else{ - *pi = OFFSET_LIST_END; + *pi = POSITION_LIST_END; } } /* -** If parameter iCol is not 0, write an 0x01 byte followed by the value of -** iCol encoded as a varint to *pp. +** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by +** the value of iCol encoded as a varint to *pp. This will start a new +** column list. ** ** Set *pp to point to the byte just after the last byte written before ** returning (do not modify it if iCol==0). Return the total number of bytes @@ -1151,7 +1211,11 @@ static int fts3PutColNumber(char **pp, int iCol){ } /* -** +** Compute the union of two position lists. The output written +** into *pp contains all positions of both *pp1 and *pp2 in sorted +** order and with any duplicates removed. All pointers are +** updated appropriately. The caller is responsible for insuring +** that there is enough space in *pp to hold the complete output. */ static void fts3PoslistMerge( char **pp, /* Output buffer */ @@ -1163,32 +1227,33 @@ static void fts3PoslistMerge( char *p2 = *pp2; while( *p1 || *p2 ){ - int iCol1; - int iCol2; + int iCol1; /* The current column index in pp1 */ + int iCol2; /* The current column index in pp2 */ - if( *p1==0x01 ) sqlite3Fts3GetVarint32(&p1[1], &iCol1); - else if( *p1==0x00 ) iCol1 = OFFSET_LIST_END; + if( *p1==POS_COLUMN ) sqlite3Fts3GetVarint32(&p1[1], &iCol1); + else if( *p1==POS_END ) iCol1 = POSITION_LIST_END; else iCol1 = 0; - if( *p2==0x01 ) sqlite3Fts3GetVarint32(&p2[1], &iCol2); - else if( *p2==0x00 ) iCol2 = OFFSET_LIST_END; + if( *p2==POS_COLUMN ) sqlite3Fts3GetVarint32(&p2[1], &iCol2); + else if( *p2==POS_END ) iCol2 = POSITION_LIST_END; else iCol2 = 0; if( iCol1==iCol2 ){ - sqlite3_int64 i1 = 0; - sqlite3_int64 i2 = 0; + sqlite3_int64 i1 = 0; /* Last position from pp1 */ + sqlite3_int64 i2 = 0; /* Last position from pp2 */ sqlite3_int64 iPrev = 0; int n = fts3PutColNumber(&p, iCol1); p1 += n; p2 += n; - /* At this point, both p1 and p2 point to the start of offset-lists. - ** An offset-list is a list of non-negative delta-encoded varints, each - ** incremented by 2 before being stored. Each list is terminated by a 0 - ** or 1 value (0x00 or 0x01). The following block merges the two lists + /* At this point, both p1 and p2 point to the start of column-lists + ** for the same column (the column with index iCol1 and iCol2). + ** A column-list is a list of non-negative delta-encoded varints, each + ** incremented by 2 before being stored. Each list is terminated by a + ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists ** and writes the results to buffer p. p is left pointing to the byte - ** after the list written. No terminator (0x00 or 0x01) is written to - ** the output. + ** after the list written. No terminator (POS_END or POS_COLUMN) is + ** written to the output. */ fts3GetDeltaVarint(&p1, &i1); fts3GetDeltaVarint(&p2, &i2); @@ -1203,7 +1268,7 @@ static void fts3PoslistMerge( }else{ fts3ReadNextPos(&p2, &i2); } - }while( i1!=OFFSET_LIST_END || i2!=OFFSET_LIST_END ); + }while( i1!=POSITION_LIST_END || i2!=POSITION_LIST_END ); }else if( iCol1isReqPos ? MERGE_POS_OR : MERGE_OR); + char *aOut = 0; + int nOut = 0; + int i; + + /* Loop through the doclists in the aaOutput[] array. Merge them all + ** into a single doclist. + */ + for(i=0; iaaOutput); i++){ + if( pTS->aaOutput[i] ){ + if( !aOut ){ + aOut = pTS->aaOutput[i]; + nOut = pTS->anOutput[i]; + pTS->aaOutput[0] = 0; + }else{ + int nNew = nOut + pTS->anOutput[i]; + char *aNew = sqlite3_malloc(nNew); + if( !aNew ){ + sqlite3_free(aOut); + return SQLITE_NOMEM; + } + fts3DoclistMerge(mergetype, 0, 0, + aNew, &nNew, pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut + ); + sqlite3_free(pTS->aaOutput[i]); + sqlite3_free(aOut); + pTS->aaOutput[i] = 0; + aOut = aNew; + nOut = nNew; + } + } + } + + pTS->aaOutput[0] = aOut; + pTS->anOutput[0] = nOut; + return SQLITE_OK; +} + /* ** This function is used as the sqlite3Fts3SegReaderIterate() callback when ** querying the full-text index for a doclist associated with a term or @@ -1572,38 +1687,63 @@ static int fts3TermSelectCb( int nDoclist ){ TermSelect *pTS = (TermSelect *)pContext; - int nNew = pTS->nOutput + nDoclist; - char *aNew = sqlite3_malloc(nNew); UNUSED_PARAMETER(p); UNUSED_PARAMETER(zTerm); UNUSED_PARAMETER(nTerm); - if( !aNew ){ - return SQLITE_NOMEM; - } - - if( pTS->nOutput==0 ){ + if( pTS->aaOutput[0]==0 ){ /* If this is the first term selected, copy the doclist to the output ** buffer using memcpy(). TODO: Add a way to transfer control of the ** aDoclist buffer from the caller so as to avoid the memcpy(). */ - memcpy(aNew, aDoclist, nDoclist); + pTS->aaOutput[0] = sqlite3_malloc(nDoclist); + pTS->anOutput[0] = nDoclist; + if( pTS->aaOutput[0] ){ + memcpy(pTS->aaOutput[0], aDoclist, nDoclist); + }else{ + return SQLITE_NOMEM; + } }else{ - /* The output buffer is not empty. Merge doclist aDoclist with the - ** existing output. This can only happen with prefix-searches (as - ** searches for exact terms return exactly one doclist). - */ int mergetype = (pTS->isReqPos ? MERGE_POS_OR : MERGE_OR); - fts3DoclistMerge(mergetype, 0, 0, - aNew, &nNew, pTS->aOutput, pTS->nOutput, aDoclist, nDoclist - ); + char *aMerge = aDoclist; + int nMerge = nDoclist; + int iOut; + + for(iOut=0; iOutaaOutput); iOut++){ + char *aNew; + int nNew; + if( pTS->aaOutput[iOut]==0 ){ + assert( iOut>0 ); + pTS->aaOutput[iOut] = aMerge; + pTS->anOutput[iOut] = nMerge; + break; + } + + nNew = nMerge + pTS->anOutput[iOut]; + aNew = sqlite3_malloc(nNew); + if( !aNew ){ + if( aMerge!=aDoclist ){ + sqlite3_free(aMerge); + } + return SQLITE_NOMEM; + } + fts3DoclistMerge(mergetype, 0, 0, + aNew, &nNew, pTS->aaOutput[iOut], pTS->anOutput[iOut], aMerge, nMerge + ); + + if( iOut>0 ) sqlite3_free(aMerge); + sqlite3_free(pTS->aaOutput[iOut]); + pTS->aaOutput[iOut] = 0; + + aMerge = aNew; + nMerge = nNew; + if( (iOut+1)==SizeofArray(pTS->aaOutput) ){ + pTS->aaOutput[iOut] = aMerge; + pTS->anOutput[iOut] = nMerge; + } + } } - - sqlite3_free(pTS->aOutput); - pTS->aOutput = aNew; - pTS->nOutput = nNew; - return SQLITE_OK; } @@ -1613,9 +1753,9 @@ static int fts3TermSelectCb( ** ** The returned doclist may be in one of two formats, depending on the ** value of parameter isReqPos. If isReqPos is zero, then the doclist is -** a sorted list of delta-compressed docids. If isReqPos is non-zero, -** then the returned list is in the same format as is stored in the -** database without the found length specifier at the start of on-disk +** a sorted list of delta-compressed docids (a bare doclist). If isReqPos +** is non-zero, then the returned list is in the same format as is stored +** in the database without the found length specifier at the start of on-disk ** doclists. */ static int fts3TermSelect( @@ -1727,12 +1867,17 @@ static int fts3TermSelect( rc = sqlite3Fts3SegReaderIterate(p, apSegment, nSegment, &filter, fts3TermSelectCb, (void *)&tsc ); + if( rc==SQLITE_OK ){ + rc = fts3TermSelectMerge(&tsc); + } if( rc==SQLITE_OK ){ - *ppOut = tsc.aOutput; - *pnOut = tsc.nOutput; + *ppOut = tsc.aaOutput[0]; + *pnOut = tsc.anOutput[0]; }else{ - sqlite3_free(tsc.aOutput); + for(i=0; ipReadRowid); } + *pRowid = cell.iRowid; if( rc==SQLITE_OK ){ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); diff --git a/ext/rtree/rtree1.test b/ext/rtree/rtree1.test index 9d9a2b6..f27cb75 100644 --- a/ext/rtree/rtree1.test +++ b/ext/rtree/rtree1.test @@ -399,4 +399,21 @@ do_test rtree-10.1 { catchsql { CREATE VIRTUAL TABLE t7 USING rtree(index, x1, y1, x2, y2) } } {1 {near "index": syntax error}} +#------------------------------------------------------------------------- +# Test last_insert_rowid(). +# +do_test rtree-11.1 { + execsql { + CREATE VIRTUAL TABLE t8 USING rtree(idx, x1, x2, y1, y2); + INSERT INTO t8 VALUES(1, 1.0, 1.0, 2.0, 2.0); + SELECT last_insert_rowid(); + } +} {1} +do_test rtree-11.2 { + execsql { + INSERT INTO t8 VALUES(NULL, 1.0, 1.0, 2.0, 2.0); + SELECT last_insert_rowid(); + } +} {2} + finish_test diff --git a/main.mk b/main.mk index 455b457..83228c3 100644 --- a/main.mk +++ b/main.mk @@ -66,7 +66,7 @@ LIBOBJ+= alter.o analyze.o attach.o auth.o \ table.o tokenize.o trigger.o \ update.o util.o vacuum.o \ vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbemem.o vdbetrace.o \ - walker.o where.o utf.o vtab.o + wal.o walker.o where.o utf.o vtab.o @@ -158,6 +158,8 @@ SRC = \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ + $(TOP)/src/wal.c \ + $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c @@ -231,6 +233,7 @@ TESTSRC = \ $(TOP)/src/test_backup.c \ $(TOP)/src/test_btree.c \ $(TOP)/src/test_config.c \ + $(TOP)/src/test_demovfs.c \ $(TOP)/src/test_devsym.c \ $(TOP)/src/test_func.c \ $(TOP)/src/test_hexio.c \ @@ -244,26 +247,51 @@ TESTSRC = \ $(TOP)/src/test_pcache.c \ $(TOP)/src/test_schema.c \ $(TOP)/src/test_server.c \ + $(TOP)/src/test_stat.c \ $(TOP)/src/test_tclvar.c \ $(TOP)/src/test_thread.c \ + $(TOP)/src/test_vfs.c \ $(TOP)/src/test_wsd.c #TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c #TESTSRC += $(TOP)/ext/fts3/fts3_tokenizer.c TESTSRC2 = \ - $(TOP)/src/attach.c $(TOP)/src/backup.c $(TOP)/src/btree.c \ - $(TOP)/src/build.c $(TOP)/src/date.c \ - $(TOP)/src/expr.c $(TOP)/src/func.c $(TOP)/src/insert.c $(TOP)/src/mem5.c \ - $(TOP)/src/os.c \ - $(TOP)/src/os_os2.c $(TOP)/src/os_unix.c $(TOP)/src/os_win.c \ - $(TOP)/src/pager.c $(TOP)/src/pragma.c $(TOP)/src/prepare.c \ - $(TOP)/src/printf.c $(TOP)/src/random.c $(TOP)/src/pcache.c \ - $(TOP)/src/pcache1.c $(TOP)/src/select.c $(TOP)/src/tokenize.c \ - $(TOP)/src/utf.c $(TOP)/src/util.c $(TOP)/src/vdbeapi.c $(TOP)/src/vdbeaux.c \ - $(TOP)/src/vdbe.c $(TOP)/src/vdbemem.c $(TOP)/src/where.c parse.c \ - $(TOP)/ext/fts3/fts3.c $(TOP)/ext/fts3/fts3_expr.c \ - $(TOP)/ext/fts3/fts3_tokenizer.c $(TOP)/ext/fts3/fts3_write.c \ + $(TOP)/src/attach.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/build.c \ + $(TOP)/src/date.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/insert.c \ + $(TOP)/src/wal.c \ + $(TOP)/src/mem5.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache1.c \ + $(TOP)/src/select.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/where.c \ + parse.c \ + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_write.c \ $(TOP)/ext/async/sqlite3async.c # Header files used by all library source files. @@ -322,8 +350,6 @@ sqlite3$(EXE): $(TOP)/src/shell.c libsqlite3.a sqlite3.h $(TOP)/src/shell.c \ libsqlite3.a $(LIBREADLINE) $(TLIBS) $(THREADLIB) -objects: $(LIBOBJ_ORIG) - # This target creates a directory named "tsrc" and fills it with # copies of all of the C source code and header files needed to # build on the target system. Some of the C source code and header @@ -393,7 +419,7 @@ parse.c: $(TOP)/src/parse.y lemon $(TOP)/addopcodes.awk rm -f parse.h ./lemon $(OPTS) parse.y mv parse.h parse.h.temp - awk -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h sqlite3.h: $(TOP)/src/sqlite.h.in $(TOP)/manifest.uuid $(TOP)/VERSION tclsh $(TOP)/tool/mksqlite3h.tcl $(TOP) >sqlite3.h @@ -467,21 +493,21 @@ tclsqlite3: $(TOP)/src/tclsqlite.c libsqlite3.a # Rules to build the 'testfixture' application. # -TESTFIXTURE_FLAGS = -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 +TESTFIXTURE_FLAGS = -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE testfixture$(EXE): $(TESTSRC2) libsqlite3.a $(TESTSRC) $(TOP)/src/tclsqlite.c - $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TCCX) $(TCL_FLAGS) -DTCLSH=1 $(TESTFIXTURE_FLAGS) \ $(TESTSRC) $(TESTSRC2) $(TOP)/src/tclsqlite.c \ -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) libsqlite3.a amalgamation-testfixture$(EXE): sqlite3.c $(TESTSRC) $(TOP)/src/tclsqlite.c - $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TCCX) $(TCL_FLAGS) -DTCLSH=1 $(TESTFIXTURE_FLAGS) \ $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) fts3-testfixture$(EXE): sqlite3.c fts3amal.c $(TESTSRC) $(TOP)/src/tclsqlite.c - $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TCCX) $(TCL_FLAGS) -DTCLSH=1 $(TESTFIXTURE_FLAGS) \ -DSQLITE_ENABLE_FTS3=1 \ $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c fts3amal.c \ -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) @@ -490,7 +516,7 @@ fulltest: testfixture$(EXE) sqlite3$(EXE) ./testfixture$(EXE) $(TOP)/test/all.test soaktest: testfixture$(EXE) sqlite3$(EXE) - ./testfixture$(EXE) $(TOP)/test/all.test -soak 1 + ./testfixture$(EXE) $(TOP)/test/all.test -soak=1 test: testfixture$(EXE) sqlite3$(EXE) ./testfixture$(EXE) $(TOP)/test/veryquick.test @@ -504,8 +530,8 @@ sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c sqlite3.c $(TESTSRC) \ -e 's,^,",' \ -e 's,$$,\\n",' \ $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h - $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ - -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 -DSQLITE_PRIVATE="" \ + $(TCCX) $(TCL_FLAGS) -DTCLSH=2 $(TESTFIXTURE_FLAGS) \ + -DSQLITE_TEST=1 -DSQLITE_PRIVATE="" \ $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ -o sqlite3_analyzer$(EXE) \ $(LIBTCL) $(THREADLIB) diff --git a/manifest b/manifest index 95e7a43..ebf522e 100644 --- a/manifest +++ b/manifest @@ -1,14 +1,14 @@ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 -C Update\sthe\sversion\snumber\sto\s3.6.23.1. -D 2010-03-26T22:28:06 +C Fix\sa\scomment\stypo.\s\sThis\scheck-in\sis\s3.7.0\srelease\scandidate\s2. +D 2010-07-21T16:16:28 F Makefile.arm-wince-mingw32ce-gcc fcd5e9cd67fe88836360bb4f9ef4cb7f8e2fb5a0 -F Makefile.in 4f2f967b7e58a35bb74fb7ec8ae90e0f4ca7868b +F Makefile.in ec08dc838fd8110fe24c92e5130bcd91cbb1ff2e F Makefile.linux-gcc d53183f4aa6a9192d249731c90dbdffbd2c68654 -F Makefile.vxworks ab005d301296c40e021ccd0133ce49ca811e319f +F Makefile.vxworks 4314cde20a1d9460ec5083526ea975442306ae7e F README cd04a36fbc7ea56932a4052d7d0b7f09f27c33d6 -F VERSION 09d2dfb6a4a47d07b3b2091e349eedef78fb0f77 +F VERSION 4dce4379514b12d6bc5c30f7d1f64582ccb4f412 F aclocal.m4 a5c22d164aff7ed549d53a90fa56d56955281f50 F addopcodes.awk 17dc593f791f874d2c23a0f9360850ded0286531 F art/2005osaward.gif 0d1851b2a7c1c9d0ccce545f3e14bca42d7fd248 @@ -18,17 +18,21 @@ F art/SQLiteLogo3.tiff b9e6bf022ae939bc986cddb8ab99583ca1b02cb3 F art/SQLite_big.gif 2b8e4603b91ba2a2c7062a82ff570d945034bb30 F art/nocopy.gif 716aa07d4bb7250d4e75756073bf8ef9f56bec8f F art/powered_by_sqlite.gif 7fbcd7d3675391fd3d21672c14c05f5999eb60d1 +F art/sqlite370.eps aa97a671332b432a54e1d74ff5e8775be34200c2 +F art/sqlite370.jpg d512473dae7e378a67e28ff96a34da7cb331def2 F art/src_logo.gif 9341ef09f0e53cd44c0c9b6fc3c16f7f3d6c2ad9 F config.guess 226d9a188c6196f3033ffc651cbc9dcee1a42977 F config.h.in 868fdb48c028421a203470e15c69ada15b9ba673 F config.sub 9ebe4c3b3dab6431ece34f16828b594fb420da55 -F configure 17dee87ba9b797ea22940dc0fb5b08147bfb246a +F configure 009ceb10a7bd768b6460b7b8782eb639063c8899 x F configure.ac 14740970ddb674d92a9f5da89083dff1179014ff F contrib/sqlitecon.tcl 210a913ad63f9f991070821e599d600bd913e0ad F doc/lemon.html f0f682f50210928c07e562621c3b7e8ab912a538 +F doc/pager-invariants.txt 870107036470d7c419e93768676fae2f8749cf9e +F doc/vfs-shm.txt e101f27ea02a8387ce46a05be2b1a902a021d37a F ext/README.txt 913a7bd3f4837ab14d7e063304181787658b14e1 F ext/async/README.txt 0c541f418b14b415212264cbaaf51c924ec62e5b -F ext/async/sqlite3async.c 676066c2a111a8b3107aeb59bdbbbf335c348f4a +F ext/async/sqlite3async.c a7c6078c82c0bac3b7bea95bc52d5ce7ed58083a F ext/async/sqlite3async.h a21e1252deb14a2c211f0e165c4b9122a8f1f344 F ext/fts1/README.txt 20ac73b006a70bcfd80069bdaf59214b6cf1db5e F ext/fts1/ft_hash.c 3927bd880e65329bdc6f506555b228b28924921b @@ -59,15 +63,15 @@ F ext/fts2/mkfts2amal.tcl 974d5d438cb3f7c4a652639262f82418c1e4cff0 F ext/fts3/README.syntax a19711dc5458c20734b8e485e75fb1981ec2427a F ext/fts3/README.tokenizers 998756696647400de63d5ba60e9655036cb966e9 F ext/fts3/README.txt 8c18f41574404623b76917b9da66fcb0ab38328d -F ext/fts3/fts3.c 2bb2045d1412184e9eea71eb151b159168be5131 +F ext/fts3/fts3.c 9dec342fa1cf0914da679a3b7c0d4b53a27883ba F ext/fts3/fts3.h 3a10a0af180d502cecc50df77b1b22df142817fe -F ext/fts3/fts3Int.h df812ef35f1b47a44ec68a44ec0c2a769c973d85 +F ext/fts3/fts3Int.h 70528ba8c33991699f96ecc64112122833cdbdb5 F ext/fts3/fts3_expr.c f4ff02ebe854e97ac03ff00b38b728a9ab57fd4b F ext/fts3/fts3_hash.c 3c8f6387a4a7f5305588b203fa7c887d753e1f1c F ext/fts3/fts3_hash.h 8331fb2206c609f9fc4c4735b9ab5ad6137c88ec F ext/fts3/fts3_icu.c ac494aed69835008185299315403044664bda295 F ext/fts3/fts3_porter.c 7546e4503e286a67fd4f2a82159620e3e9c7a1bc -F ext/fts3/fts3_snippet.c 538bd27a76e465cb4ef6bfcb5479d897e4d5a536 +F ext/fts3/fts3_snippet.c bc582c38e194b48818da862f9e6f293cc44e29ee F ext/fts3/fts3_tokenizer.c 1a49ee3d79cbf0b9386250370d9cbfe4bb89c8ff F ext/fts3/fts3_tokenizer.h 13ffd9fcb397fec32a05ef5cd9e0fa659bf3dbd3 F ext/fts3/fts3_tokenizer1.c b6d86d1d750787db5c168c73da4e87670ed890a1 @@ -77,9 +81,9 @@ F ext/icu/README.txt 3b130aa66e7a681136f6add198b076a2f90d1e33 F ext/icu/icu.c 850e9a36567bbcce6bd85a4b68243cad8e3c2de2 F ext/icu/sqliteicu.h 728867a802baa5a96de7495e9689a8e01715ef37 F ext/rtree/README 6315c0d73ebf0ec40dedb5aa0e942bc8b54e3761 -F ext/rtree/rtree.c a354f6be11a91706680936fdf77b4588f0b34dbe +F ext/rtree/rtree.c c7a18311f2d6ae9a42838e9c04b9e670483b4feb F ext/rtree/rtree.h 834dbcb82dc85b2481cde6a07cdadfddc99e9b9e -F ext/rtree/rtree1.test f72885ed80a329d6bd7991043016d74b51edf2c5 +F ext/rtree/rtree1.test 51bb0cd0405970501e63258401ae5ad235a4f468 F ext/rtree/rtree2.test 7b665c44d25e51b3098068d983a39902b2e2d7a1 F ext/rtree/rtree3.test dece988c363368af8c11862995c762071894918f F ext/rtree/rtree4.test 94fdd570ab5bc47244d87d4590023be43ac786bd @@ -90,156 +94,161 @@ F ext/rtree/rtree_perf.tcl 6c18c1f23cd48e0f948930c98dfdd37dfccb5195 F ext/rtree/rtree_util.tcl 06aab2ed5b826545bf215fff90ecb9255a8647ea F ext/rtree/tkt3363.test 2bf324f7908084a5f463de3109db9c6e607feb1b F ext/rtree/viewrtree.tcl eea6224b3553599ae665b239bd827e182b466024 -F install-sh 9d4de14ab9fb0facae2f48780b874848cbf2f895 +F install-sh 9d4de14ab9fb0facae2f48780b874848cbf2f895 x F ltmain.sh 3ff0879076df340d2e23ae905484d8c15d5fdea8 -F main.mk a36a05a481afcc00388c4d6d4db0e12cacb546e3 +F main.mk 26ad86cf0689940f19b3d608bbfdb3956b2fb9a7 F mkdll.sh 7d09b23c05d56532e9d44a50868eb4b12ff4f74a F mkextu.sh 416f9b7089d80e5590a29692c9d9280a10dbad9f F mkextw.sh 4123480947681d9b434a5e7b1ee08135abe409ac F mkopcodec.awk 3fb9bf077053c968451f4dd03d11661ac373f9d1 F mkopcodeh.awk 29b84656502eee5f444c3147f331ee686956ab0e F mkso.sh fd21c06b063bb16a5d25deea1752c2da6ac3ed83 -F publish.sh c74b6c2b6b63435aa1b4b43b1396dfebfae84095 +F publish.sh 313c5b2425f2cf5e547db7549a9796acc4508f22 F publish_osx.sh 2ad2ee7d50632dff99949edc9c162dbb052f7534 F spec.template 86a4a43b99ebb3e75e6b9a735d5fd293a24e90ca F sqlite.pc.in 42b7bf0d02e08b9e77734a47798d1a55a9e0716b F sqlite3.1 6be1ad09113570e1fc8dcaff84c9b0b337db5ffc F sqlite3.pc.in ae6f59a76e862f5c561eb32a380228a02afc3cad -F src/alter.c e6f4d11b1c0b23642fc46bac9abe0753c4294e05 -F src/analyze.c 92a65a5a402898a52b03695c7f0cd383724d711f -F src/attach.c 7abe1607c2054585377cdba3c219e8572f84ca5e +F src/alter.c a9ff6f14b3935502537e90194b66c7bc79bed317 +F src/analyze.c 3457a2af126eb78f20ad239c225a2c8ed61b78b6 +F src/attach.c 17bec1f18254d9341369f20f90ba24ce35d20d10 F src/auth.c 523da7fb4979469955d822ff9298352d6b31de34 -F src/backup.c b293534bc2df23c57668a585b17ee7faaaef0939 +F src/backup.c 51d83300fe0baee39405c416ceb19a58ed30a8ed F src/bitvec.c 06ad2c36a9c3819c0b9cbffec7b15f58d5d834e0 F src/btmutex.c 96a12f50f7a17475155971a241d85ec5171573ff -F src/btree.c 0d6e44d664b1775b269ea7e6f66fdffcfc32ceb3 -F src/btree.h 0e193b7e90f1d78b79c79474040e3d66a553a4fa -F src/btreeInt.h 71ed5e7f009caf17b7dc304350b3cb64b5970135 -F src/build.c 11100b66fb97638d2d874c1d34d8db90650bb1d7 -F src/callback.c 908f3e0172c3d4058f4ca0acd42c637c52e9669f +F src/btree.c 9a214e6141555b183216b73ace058c7a499cdbe2 +F src/btree.h dd83041eda10c17daf023257c1fc883b5f71f85a +F src/btreeInt.h b0c87f6725b06a0aa194a6d25d54b16ce9d6e291 +F src/build.c 559d38b48f79bc92370c082f4606eefe1e8f94ac +F src/callback.c 01843bdf4b0420fd28046525d150fcd9802931a9 F src/complete.c dc1d136c0feee03c2f7550bafc0d29075e36deac -F src/ctime.c ceb247eb31620bba66a94c3f697db489a1652353 -F src/date.c 485a4409a384310e6d93fd1104a9d0a8658becd9 -F src/delete.c 610dc008e88a9599f905f5cbe9577ac9c36e0581 -F src/expr.c 6baed2a0448d494233d9c0a610eea018ab386a32 +F src/ctime.c 4f3aadad62c6c9f0d4e5a96718516ac4e3c598df +F src/date.c 5dd8448a0bfea8d31fb14cff487d0c06ff8c8b20 +F src/delete.c 41cb4f78557810eecc167b7e2317de7e12d20929 +F src/expr.c b2b053429575bf964c64bdf5459c5cbbe5bf93b8 F src/fault.c 160a0c015b6c2629d3899ed2daf63d75754a32bb F src/fkey.c e2116672a6bd610dc888e27df292ebc7999c9bb0 -F src/func.c 5dca069d98eca0ff70c9a8fb8ab9e1d6467187b5 -F src/global.c 5a9c1e3c93213ca574786ac1caa976ce8f709105 +F src/func.c 0c28599430856631216b6c0131c51c89bf516026 +F src/global.c 02335177cf6946fe5525c6f0755cf181140debf3 F src/hash.c 458488dcc159c301b8e7686280ab209f1fb915af F src/hash.h 2894c932d84d9f892d4b4023a75e501f83050970 F src/hwtime.h d32741c8f4df852c7d959236615444e2b1063b08 -F src/insert.c 76d6b44a9f9050134fd81205f4b792cbdac7c925 -F src/journal.c b0ea6b70b532961118ab70301c00a33089f9315c +F src/insert.c d9476f23f85a20eea3cc25a4b9f9cbae77a33bf2 +F src/journal.c 552839e54d1bf76fb8f7abe51868b66acacf6a0e F src/legacy.c a199d7683d60cef73089e892409113e69c23a99f F src/lempar.c 7f026423f4d71d989e719a743f98a1cbd4e6d99e F src/loadext.c 1c7a61ce1281041f437333f366a96aa0d29bb581 -F src/main.c 7d89bb6dcc6993a8d32f4f22dae3e57c50a41399 -F src/malloc.c 5fa175797f982b178eaf38afba9c588a866be729 +F src/main.c a487fe90aecaccb142e4a6b738c7e26e99145bcd +F src/malloc.c 09c3777bf733a387bec6aa344e455eb4e8ecf47e F src/mem0.c 6a55ebe57c46ca1a7d98da93aaa07f99f1059645 F src/mem1.c 89d4ea8d5cdd55635cbaa48ad53132af6294cbb2 -F src/mem2.c ee752297650632935218dcf3b20c5ed5899cb4b5 +F src/mem2.c 2ee7bdacda8299b5a91cff9f7ee3e46573195c38 F src/mem3.c 9b237d911ba9904142a804be727cc6664873f8a3 F src/mem5.c eb7a5cb98915dd7a086fa415ce3a5a0f20d0acff -F src/memjournal.c 5bfc2f33c914946e2f77ed3f882aff14dfc9355d -F src/mutex.c 581a272e09098040ca3ef543cb5f3d643eff7d50 +F src/memjournal.c 4a93a25ad9f76c40afa070ffd7187eb3a5fd7aee +F src/mutex.c 6949180803ff05a7d0e2b9334a95b4fb5a00e23f F src/mutex.h 6fde601e55fa6c3fae768783c439797ab84c87c6 -F src/mutex_noop.c 5f58eaa31f2d742cb8957a747f7887ae98f16053 -F src/mutex_os2.c 20477db50cf3817c2f1cd3eb61e5c177e50231db -F src/mutex_unix.c 04a25238abce7e3d06b358dcf706e26624270809 -F src/mutex_w32.c 4cc201c1bfd11d1562810554ff5500e735559d7e -F src/notify.c f799bbda67ab6619b36b0a24153b49518874a203 -F src/os.c 8bc63cf91e9802e2b807198e54e50227fa889306 -F src/os.h 534b082c3cb349ad05fa6fa0b06087e022af282c -F src/os_common.h 240c88b163b02c21a9f21f87d49678a0aa21ff30 -F src/os_os2.c 75a8c7b9a00a2cf1a65f9fa4afbc27d46634bb2f -F src/os_unix.c 148d2f625db3727250c0b880481ae7630b6d0eb0 -F src/os_win.c 1c7453c2df4dab26d90ff6f91272aea18bcf7053 -F src/pager.c 1915e3ec1a2157d0c29086b7fc0c936a2d97029e -F src/pager.h 1b32faf2e578ac3e7bcf9c9d11217128261c5c54 -F src/parse.y ace5c7a125d9f2a410e431ee3209034105045f7e -F src/pcache.c 4956b41d6ba913f7a8a56fbf32be78caed0e45c2 +F src/mutex_noop.c d5cfbca87168c661a0b118cd8e329a908e453151 +F src/mutex_os2.c 6a62583e374ba3ac1a3fcc0da2bfdac7d3942689 +F src/mutex_unix.c cf84466b4fdd2baa0d5a10bb19f08b2abc1ce42e +F src/mutex_w32.c 1fe0e735897be20e09dd6f53c3fb516c6b48c0eb +F src/notify.c cbfa66a836da3a51567209636e6a94059c137930 +F src/os.c 60178f518c4d6c0dcb59f7292232281d7bea2dcf +F src/os.h 9dbed8c2b9c1f2f2ebabc09e49829d4777c26bf9 +F src/os_common.h a8f95b81eca8a1ab8593d23e94f8a35f35d4078f +F src/os_os2.c 665876d5eec7585226b0a1cf5e18098de2b2da19 +F src/os_unix.c 3109e0e5a0d5551bab2e8c7322b20a3b8b171248 +F src/os_win.c 1f8b0a1a5bcf6289e7754d0d3c16cec16d4c93ab +F src/pager.c 78ca1e1f3315c8227431c403c04d791dccf242fb +F src/pager.h 879fdde5a102d2f21a3135d6f647530b21c2796c +F src/parse.y 220a11ac72e2c9dffbf4cbe5fe462f328bd8d884 +F src/pcache.c 1e9aa2dbc0845b52e1b51cc39753b6d1e041cb07 F src/pcache.h c683390d50f856d4cd8e24342ae62027d1bb6050 -F src/pcache1.c 2bb2261190b42a348038f5b1c285c8cef415fcc8 -F src/pragma.c 56d95f76154a5f873c32eae485bb625f3c70be46 -F src/prepare.c 18292e5f365655cd5c5693e09508e90668f7d547 +F src/pcache1.c 3a7c28f46a61b43ff0b5c087a7983c154f4b264c +F src/pragma.c 4a79269ea6f86150fb8e44688c753989fc7238dd +F src/prepare.c f045aeff869d6409a2eae2fe08f7dc2df9528195 F src/printf.c 5f5b65a83e63f2096a541a340722a509fa0240a7 F src/random.c cd4a67b3953b88019f8cd4ccd81394a8ddfaba50 -F src/resolve.c a1648d98e869937b29f4f697461fe4d60f220a7b +F src/resolve.c 1c0f32b64f8e3f555fe1f732f9d6f501a7f05706 F src/rowset.c 69afa95a97c524ba6faf3805e717b5b7ae85a697 -F src/select.c 4113ef360430ed4e7533690ef46d06c20204adce -F src/shell.c c40427c7245535a04a9cb4a417b6cc05c022e6a4 -F src/sqlite.h.in 08a2d9a278ff0dfd65055a7ec9c599f7ae1a3c18 +F src/select.c 4903ff1bbd08b55cbce00ea43c645530de41b362 +F src/shell.c fd4ccdb37c3b68de0623eb938a649e0990710714 +F src/sqlite.h.in 8b05aef506d9bc7fc7da1572744e3174cb16ed59 F src/sqlite3ext.h 69dfb8116af51b84a029cddb3b35062354270c89 -F src/sqliteInt.h 6873f7f4c24fcdceece8777f2a1cbec049df77a0 -F src/sqliteLimit.h 3afab2291762b5d09ae20c18feb8e9fa935a60a6 -F src/status.c d329385a2cba3ea49d9d68af0ad84b22d46b4f40 +F src/sqliteInt.h d9e42f2029d4c526f9ba960bda1980ef17429c30 +F src/sqliteLimit.h 196e2f83c3b444c4548fc1874f52f84fdbda40f3 +F src/status.c 4df6fe7dce2d256130b905847c6c60055882bdbe F src/table.c 2cd62736f845d82200acfa1287e33feb3c15d62e -F src/tclsqlite.c bad6570a005b234ea670b9f7b48256da19a032d3 -F src/test1.c aa9b1e10e834330e7759afb639420117e2422ded -F src/test2.c b6b43413d495addd039a88b87d65c839f86b18cb +F src/tclsqlite.c ae1e4fb653c91ddad7e2534d209711a12604ccc4 +F src/test1.c ff3b4533fc4d78d1bff2ef831a5791db55096ed3 +F src/test2.c e3f564ab1e9fd0b47b0c9e23e7054e38bf0836cf F src/test3.c 4c21700c73a890a47fc685c1097bfb661346ac94 F src/test4.c ad03bb987ddedce928f4258c1e7fa4109a73497d F src/test5.c cc55900118fa4add8ec9cf69fc4225a4662f76b1 -F src/test6.c a8ece4284d0e34477f349ac05655db73c48e0926 +F src/test6.c c7256cc21d2409486d094277d5b017e8eced44ba F src/test7.c 3f2d63e4ccf97f8c2cf1a7fa0a3c8e2e2a354e6e F src/test8.c f959db9a22d882013b64c92753fa793b2ce3bdea F src/test9.c bea1e8cf52aa93695487badedd6e1886c321ea60 -F src/test_async.c c1656facbaf43cb2e71b62621e5b9eb080e2621c +F src/test_async.c 0612a752896fad42d55c3999a5122af10dcf22ad F src/test_autoext.c 30e7bd98ab6d70a62bb9ba572e4c7df347fe645e F src/test_backup.c c129c91127e9b46e335715ae2e75756e25ba27de F src/test_btree.c 47cd771250f09cdc6e12dda5bc71bc0b3abc96e2 -F src/test_config.c 5844274bf6cec4af3e6461fb3e2d349082635e81 -F src/test_devsym.c de3c9af2bb9a8b1e44525c449e4ec3f88e3d4110 +F src/test_config.c 5a11c51af2156e2d07186930b36f2b8239a4393f +F src/test_demovfs.c da81a5f7785bb352bda7911c332a983ec4f17f27 +F src/test_devsym.c e7498904e72ba7491d142d5c83b476c4e76993bc F src/test_func.c 13b582345fb1185a93e46c53310fae8547dcce20 F src/test_hexio.c 1237f000ec7a491009b1233f5c626ea71bce1ea2 F src/test_init.c 5d624ffd0409d424cf9adbfe1f056b200270077c F src/test_intarray.c d879bbf8e4ce085ab966d1f3c896a7c8b4f5fc99 F src/test_intarray.h 489edb9068bb926583445cb02589344961054207 -F src/test_journal.c adc0ce3840ed19b49feb1d583b2212f560ef7866 +F src/test_journal.c 424a334cdfdc8a6f975abe3641440147bded3185 F src/test_loadext.c df586c27176e3c2cb2e099c78da67bf14379a56e -F src/test_malloc.c f777d15df756bea0e98271932464ac5d882e66fe +F src/test_malloc.c 4ab85f2b8ae3a237f4e6557b0a641181a19ffab1 F src/test_mutex.c ce06b59aca168cd8c520b77159a24352a7469bd3 -F src/test_onefile.c 06da7e085dce42924cf062b91763dd4bb84c6101 -F src/test_osinst.c 90fb03d396f39956897dfb4bd0e62c6711db1cca +F src/test_onefile.c 40cf9e212a377a6511469384a64b01e6e34b2eec +F src/test_osinst.c f408c6a181f2fb04c56273afd5c3e1e82f60392c F src/test_pcache.c 7bf828972ac0d2403f5cfa4cd14da41f8ebe73d8 F src/test_schema.c 8c06ef9ddb240c7a0fcd31bc221a6a2aade58bf0 F src/test_server.c bbba05c144b5fc4b52ff650a4328027b3fa5fcc6 +F src/test_stat.c 6ebaf2a86d01ccda24e49c148f1d33e8badda06e F src/test_tclvar.c f4dc67d5f780707210d6bb0eb6016a431c04c7fa -F src/test_thread.c 00fed80690ae7f1525483a35861511c48bc579f2 +F src/test_thread.c bedd05cad673dba53326f3aa468cc803038896c0 +F src/test_vfs.c 7e291f85256516ebde6633bc381ff7eedfa30234 F src/test_wsd.c 41cadfd9d97fe8e3e4e44f61a4a8ccd6f7ca8fe9 F src/tokenize.c 25ceb0f0a746ea1d0f9553787f3f0a56853cfaeb -F src/trigger.c 340c9eca0fb24b1197468d96ba059f867c9834c7 -F src/update.c c0dc6b75ad28b76b619042d934f337b02acee208 +F src/trigger.c 67e95c76d625b92d43409ace771c8e0d02a09ac2 +F src/update.c 19c899c23cd29fd102c9068e0b0ff5b087204beb F src/utf.c 1baeeac91707a4df97ccc6141ec0f808278af685 F src/util.c 32aebf04c10e51ad3977a928b7416bed671b620b -F src/vacuum.c b1d542c8919d4d11119f78069e1906a1ad07e0ee -F src/vdbe.c 8acca6dab2505e9650f6f014ada6ef30570cba99 +F src/vacuum.c 241a8386727c1497eba4955933356dfba6ff8c9f +F src/vdbe.c 6294de3327e09d14e9c06ecfd10e57c2d8e85307 F src/vdbe.h 471f6a3dcec4817ca33596fe7f6654d56c0e75f3 -F src/vdbeInt.h ae1e6ba0dd3fb4a886898d2829d748be701b01f8 -F src/vdbeapi.c 74c25680046a116b24b95393914d3669c23305dc -F src/vdbeaux.c 0f352f63be78138bd94275aa3c8361e760ecc639 -F src/vdbeblob.c 5327132a42a91e8b7acfb60b9d2c3b1c5c863e0e -F src/vdbemem.c 2a82f455f6ca6f78b59fb312f96054c04ae0ead1 +F src/vdbeInt.h 19ebc8c2a2e938340051ee65af3f377fb99102d1 +F src/vdbeapi.c dc3138f10afbc95ed3c21dd25abb154504b1db9d +F src/vdbeaux.c 7f99c1f00e4b31e8b28d8a87ecc2322bb46ae99c +F src/vdbeblob.c 258a6010ba7a82b72b327fb24c55790655689256 +F src/vdbemem.c 5e579abf6532001dfbee0e640dc34eae897a9807 F src/vdbetrace.c 864cef96919323482ebd9986f2132435115e9cc2 -F src/vtab.c 606adf51cd6d4ba51a8c6dccede06a6f7b0dd72d +F src/vtab.c a0f8a40274e4261696ef57aa806de2776ab72cda +F src/wal.c 0925601f3299c2941a67c9cfff41ee710f70ca82 +F src/wal.h 906c85760598b18584921fe08008435aa4eeeeb2 F src/walker.c 3112bb3afe1d85dc52317cb1d752055e9a781f8f -F src/where.c 399ea4c090284c9d16f76d685b9b44e8b9b4442b +F src/where.c 903a7828a0a7de03b5d0f1b5eff222d8d5b138f1 F test/aggerror.test a867e273ef9e3d7919f03ef4f0e8c0d2767944f2 F test/alias.test 4529fbc152f190268a15f9384a5651bbbabc9d87 -F test/all.test 14165b3e32715b700b5f0cbf8f6e3833dda0be45 -F test/alter.test 645b2e8d23c9936f9494af9d2fa7f8351a248c6e -F test/alter2.test d0133bfa7a0a24aa84c034051410b95217d24a35 +F test/all.test 6745008c144bd2956d58864d21f7b304689c1cce +F test/alter.test 15f9224868b290d6bf7a63f31437f31aee070636 +F test/alter2.test 52096b711afe5f219e575c6db7a70f7a35df4f63 F test/alter3.test 25b95a136708f22b87184fa6a4309eea03d65153 F test/alter4.test 9386ffd1e9c7245f43eca412b2058d747509cc1f F test/altermalloc.test e81ac9657ed25c6c5bb09bebfa5a047cd8e4acfc -F test/analyze.test ad5329098fe4de4a96852231d53e3e9e6283ad4b -F test/analyze2.test a2ad7b0a4e13801ee3968fe70f22aff52326569c +F test/analyze.test bf692e7db414f268a136bade16c03a1bdbb9240c +F test/analyze2.test 59dac6c399c0c5d1a90a11ee7cc606743fb6db93 F test/analyze3.test 506203875258ffd8ffa879b9c3c5432022d2b6d8 -F test/async.test 8c75d31b8330f8b70cf2571b014d4476a063efdb +F test/async.test ad4ba51b77cd118911a3fe1356b0809da9c108c3 F test/async2.test bf5e2ca2c96763b4cba3d016249ad7259a5603b6 F test/async3.test 93edaa9122f498e56ea98c36c72abc407f4fb11e -F test/async4.test aafa6328c559d3e4bb587de770cbdecfca06f0da +F test/async4.test 1787e3952128aa10238bf39945126de7ca23685a F test/async5.test f3592d79c84d6e83a5f50d3fd500445f7d97dfdf F test/attach.test ce9660e51768fab93cf129787be886c5d6c4fd81 F test/attach2.test a295d2d7061adcee5884ef4a93c7c96a82765437 @@ -249,16 +258,17 @@ F test/auth.test 8f21c160a4562f54f27618e85bac869efcecbcaf F test/auth2.test 270baddc8b9c273682760cffba6739d907bd2882 F test/auth3.test a4755e6a2a2fea547ffe63c874eb569e60a28eb5 F test/autoinc.test 85ef3180a737e6580086a018c09c6f1a52759b46 -F test/autovacuum.test 25f891bc343a8bf5d9229e2e9ddab9f31a9ab5ec +F test/autoindex1.test ffb06a246e2c1f89cfbe3d93eca513c9e78d4063 +F test/autovacuum.test bb7c0885e6f8f1d633045de48f2b66082162766d F test/autovacuum_ioerr2.test 598b0663074d3673a9c1bc9a16e80971313bafe6 -F test/avtrans.test 1e901d8102706b63534dbd2bdd4d8f16c4082650 -F test/backup.test 3549ea8f541a08205c0eb813b21e81ea8301f6ed -F test/backup2.test 159419073d9769fdb1780ed7e5b391a046f898d5 +F test/avtrans.test 0252654f4295ddda3b2cce0e894812259e655a85 +F test/backup.test 200e64bd91244b73ca8094bc1e03dfc83cc94c2e +F test/backup2.test b7c69f937c912e85ac8a5dbd1e1cf290302b2d49 F test/backup_ioerr.test 1f012e692f42c0442ae652443258f70e9f20fa38 -F test/backup_malloc.test 1e063c6d75143d0d6e0ae77971dd690070369387 +F test/backup_malloc.test 7162d604ec2b4683c4b3799a48657fb8b5e2d450 F test/badutf.test d5360fc31f643d37a973ab0d8b4fb85799c3169f F test/between.test 16b1776c6323faadb097a52d673e8e3d8be7d070 -F test/bigfile.test b746a34ce0e2039994b45fea8b7fbfa78f594cdf +F test/bigfile.test a8ec8073a20207456dab01a29ad9cde42b0dd103 F test/bigrow.test f0aeb7573dcb8caaafea76454be3ade29b7fc747 F test/bind.test 3c7b320969000c441a70952b0b15938fbb66237c F test/bindxfer.test efecd12c580c14df5f4ad3b3e83c667744a4f7e0 @@ -273,11 +283,11 @@ F test/boundary3.test 56ef82096b4329aca2be74fa1e2b0f762ea0eb45 F test/boundary4.tcl 0bb4b1a94f4fc5ae59b79b9a2b7a140c405e2983 F test/boundary4.test 89e02fa66397b8a325d5eb102b5806f961f8ec4b F test/busy.test 76b4887f8b9160ba903c1ac22e8ff406ad6ae2f0 -F test/cache.test 3ff445c445742a7b6b9ba6e1d62a25263f9424b9 -F test/capi2.test 172c717ed101e78e0798dd21b9896a22366f35b4 -F test/capi3.test 168e2cd66c58c510955b0f299750e4de73b8d952 -F test/capi3b.test 664eb55318132f292f2c436f90906f578cad6b97 -F test/capi3c.test 493385107dcedfaf4f2b1c3738c8c1fa00362006 +F test/cache.test c4288607b54f2702858492fc4b92828336a1812f +F test/capi2.test 00032d7504b9c14f1b36331670c5e7b0f73e3c5d +F test/capi3.test 1945a2ba75e3f4c49d5beb8fc092115b6292d471 +F test/capi3b.test efb2b9cfd127efa84433cd7a2d72ce0454ae0dc4 +F test/capi3c.test bea67403a5e37a4b33230ee4723e315a2ffb31e7 F test/capi3d.test 57d83b690d7364bde02cddbf8339a4b50d80ce23 F test/cast.test 166951664a0b0a2e0f8fb5997a152490c6363932 F test/check.test db2b29d557544347d28e25b8406f5d5ecc3d1bc3 @@ -285,7 +295,7 @@ F test/coalesce.test cee0dccb9fbd2d494b77234bccf9dc6c6786eb91 F test/collate1.test e3eaa48c21e150814be1a7b852d2a8af24458d04 F test/collate2.test 04cebe4a033be319d6ddbb3bbc69464e01700b49 F test/collate3.test d28d2cfab2c3a3d4628ae4b2b7afc9965daa3b4c -F test/collate4.test 4545554388daaa604e5b3def3aa2f7ed6d56e8da +F test/collate4.test 3d3f123f83fd8ccda6f48d617e44e661b9870c7d F test/collate5.test fe0f43c4740d7b71b959cac668d19e42f2e06e4d F test/collate6.test 8be65a182abaac8011a622131486dafb8076e907 F test/collate7.test fac8db7aac3978466c04ae892cc74dcf2bc031aa @@ -295,20 +305,20 @@ F test/collateA.test b8218ab90d1fa5c59dcf156efabb1b2599c580d6 F test/colmeta.test 087c42997754b8c648819832241daf724f813322 F test/colname.test 08948a4809d22817e0e5de89c7c0a8bd90cb551b F test/conflict.test 0ed68b11f22721052d880ee80bd528a0e0828236 -F test/corrupt.test 0d346c9fe064ca71281685a8a732fcc83461bb99 -F test/corrupt2.test a571e30ea4e82318f319a24b6cc55935ce862079 -F test/corrupt3.test 263e8bb04e2728df832fddf6973cf54c91db0c32 -F test/corrupt4.test acdb01afaedf529004b70e55de1a6f5a05ae7fff +F test/corrupt.test 1a5bef8b2d178859af69814ecedcd37219a89968 +F test/corrupt2.test 808a28d0ca3b97e9aa8c91cd2b485ea2700b76d1 +F test/corrupt3.test a399dacccb91c732f6b071c913e70d195af8c058 +F test/corrupt4.test b963f9e01e0f92d15c76fb0747876fd4b96dc30a F test/corrupt5.test c23da7bfb20917cc7fdbb13ee25c7cc4e9fffeff -F test/corrupt6.test e69b877d478224deab7b66844566258cecacd25e -F test/corrupt7.test 1eb2214f29474fa6b155aa3da8a7d46bf52089e1 -F test/corrupt8.test 9992ef7f67cefc576b92373f6bf5ab8775280f51 -F test/corrupt9.test 4aa1cb1ef091cb0e13e89a819c72911631b5176a -F test/corruptA.test 99e95620b980161cb3e79f06a884a4bb8ae265ff -F test/corruptB.test 66b4544104dd03d0f33ea69ddac3fa4a682cd3c2 -F test/corruptC.test 691ed070baef5e1345939caadf270a52837a5064 +F test/corrupt6.test 4e4161aef1f30b9f34582bb4142334b7f47eacae +F test/corrupt7.test a90caf89c7d7cb7893ea4d92529bd0c129317ee4 +F test/corrupt8.test 48eb37ffb9a03bceada62219e2bd4c92f4b0cb75 +F test/corrupt9.test fad0bc26a5c972580a8d763c62f24094f4e8ef25 +F test/corruptA.test 38b4f81c16099f6d8fa8b37e188fde76b8243994 +F test/corruptB.test 44133515cf46c4d7bba691e3bcfa478080413af0 +F test/corruptC.test 483aa35dadfd96bdf549e38d75ffc2942576477e F test/corruptD.test 3ae6e2dc6e2226c6935a8a40d4b5ee3eba75f8c0 -F test/corruptE.test dbf66cae4c0e977ca9625a9114cdd01df8967bef +F test/corruptE.test 7290b61145d954be549340e462ca84826d8a31a3 F test/count.test 454e1ce985c94d13efeac405ce54439f49336163 F test/crash.test 1b6ac8410689ff78028887f445062dc897c9ac89 F test/crash2.test 5b14d4eb58b880e231361d3b609b216acda86651 @@ -317,35 +327,37 @@ F test/crash4.test 02ff4f15c149ca1e88a5c299b4896c84d9450c3b F test/crash5.test 80a2f7073381837fc082435c97df52a830abcd80 F test/crash6.test 9c730cf06335003cb1f5cfceddacd044155336e0 F test/crash7.test e20a7b9ee1d16eaef7c94a4cb7ed2191b4d05970 -F test/crash8.test 3af0fc90c3e593b85e810b8d6c50fc7d0df30008 +F test/crash8.test 76b95451933fe172ce8e26bff22d5c663c8ae473 F test/crashtest1.c 09c1c7d728ccf4feb9e481671e29dda5669bbcc2 F test/createtab.test 199cf68f44e5d9e87a0b8afc7130fdeb4def3272 F test/cse.test 277350a26264495e86b1785f34d2d0c8600e021c -F test/ctime.test f5040beef89c1b2bdb6a9edb7358a519213ff80c -F test/date.test 0b8473ed9ab6fd4283b4a01f035e1067762ba734 +F test/ctime.test 7bd009071e242aac4f18521581536b652b789a47 +F test/date.test 6354b883f922c38046a8efbad187cc95df6da023 +F test/dbstatus.test 838447a0ecca1232675b025c0a518a9ef0f8057e F test/default.test 6faf23ccb300114924353007795aa9a8ec0aa9dc F test/delete.test f7629d9eb245dfca170169cc5c7a735dec34aeb4 F test/delete2.test 3a03f2cca1f9a67ec469915cb8babd6485db43fa F test/delete3.test 555e84a00a99230b7d049d477a324a631126a6ab -F test/descidx1.test a13d443571e045b61b1b2b759df8dcffa092c968 -F test/descidx2.test 1310ed1326cdfed4ea2c55169631579f082d174f -F test/descidx3.test 3394ad4d089335cac743c36a14129d6d931c316f +F test/descidx1.test b1353c1a15cfbee97b13a1dcedaf0fe78163ba6a +F test/descidx2.test 9f1a0c83fd57f8667c82310ca21b30a350888b5d +F test/descidx3.test fe720e8b37d59f4cef808b0bf4e1b391c2e56b6f F test/diskfull.test 0cede7ef9d8f415d9d3944005c76be7589bb5ebb F test/distinctagg.test 1a6ef9c87a58669438fc771450d7a72577417376 +F test/e_expr.test 141e53fea525bce4f5403fcb0067b88e64fec5eb F test/e_fkey.test 6721a741c6499b3ab7e5385923233343c8f1ad05 -F test/e_fts3.test 5adb033fae6e07002d11f4a7c8f8e8ff9f31e8ec +F test/e_fts3.test 75bb0aee26384ef586165e21018a17f7cd843469 F test/enc.test e54531cd6bf941ee6760be041dff19a104c7acea F test/enc2.test 6d91a5286f59add0cfcbb2d0da913b76f2242398 F test/enc3.test 5c550d59ff31dccdba5d1a02ae11c7047d77c041 F test/eval.test bc269c365ba877554948441e91ad5373f9f91be3 -F test/exclusive.test 4d8a112d6c5bf52014e9383c25ff193cc4f67185 -F test/exclusive2.test 6bdf254770a843c2933b54bee9ed239934f0a183 +F test/exclusive.test 5fe18e10a159342dd52ca14b1554e33f98734267 +F test/exclusive2.test fcbb1c9ca9739292a0a22a3763243ad6d868086b F test/exec.test e949714dc127eaa5ecc7d723efec1ec27118fdd7 F test/expr.test 9f521ae22f00e074959f72ce2e55d46b9ed23f68 -F test/filectrl.test 8923a6dc7630f31c8a9dd3d3d740aa0922df7bf8 -F test/filefmt.test 84e3d0fe9f12d0d2ac852465c6f8450aea0d6f43 +F test/filectrl.test 97003734290887566e01dded09dc9e99cb937e9e +F test/filefmt.test 5d271bf467e6557fe7499dcc8203069c9dc5825e F test/fkey1.test 01c7de578e11747e720c2d9aeef27f239853c4da -F test/fkey2.test e71f5baf9bb42cdba4700d73cba6f4d82fd6b925 +F test/fkey2.test 098c06c139a79f690301a43511cd1f6420ae5433 F test/fkey3.test 42f88d6048d8dc079e2a8cf7baad1cc1483a7620 F test/fkey_malloc.test a5ede29bd2f6e56dea78c3d43fb86dd696c068c8 F test/format4.test 1f0cac8ff3895e9359ed87e41aaabee982a812eb @@ -363,7 +375,7 @@ F test/fts1m.test 2d9ca67b095d49f037a914087cc0a61e89da4f0c F test/fts1n.test a2317dcd27b1d087ee3878b30e0a59c593c98b7a F test/fts1o.test 382b8b07a2d6de5610814d9477117c4430464b9c F test/fts1porter.test d86e9c3e0c7f8ff95add6582b4b585fb4e02b96d -F test/fts2.test 2fcc0cfcda440f1eb23b5d7897a8ec7b55a02239 +F test/fts2.test e3fb95f96a650411574efc136f3fb10eef479ed7 F test/fts2a.test 473a5c8b473a4e21a8e3fddaed1e59666e0c6ab7 F test/fts2b.test 964abc0236c849c07ca1ae496bb25c268ae94816 F test/fts2c.test ffb5a35230ac72c4354535c547965ce6824537c0 @@ -383,8 +395,8 @@ F test/fts2p.test 4b48c35c91e6a7dbf5ac8d1e5691823cc999aafb F test/fts2q.test b2fbbe038b7a31a52a6079b215e71226d8c6a682 F test/fts2r.test b154c30b63061d8725e320fba1a39e2201cadd5e F test/fts2token.test d8070b241a15ff13592a9ae4a8b7c171af6f445a -F test/fts3.test ae0433b09b12def08105640e57693726c4949338 -F test/fts3_common.tcl 1d887ded06dac9b993cfb175618df7f70c796de2 +F test/fts3.test 672a040ea57036fb4b6fdc09027c18d7d24ab654 +F test/fts3_common.tcl 4d8eec9db565fed9098f45c378f28e1657802011 F test/fts3aa.test 5327d4c1d9b6c61021696746cc9a6cdc5bf159c0 F test/fts3ab.test 09aeaa162aee6513d9ff336b6932211008b9d1f9 F test/fts3ac.test 636ed7486043055d4f126a0e385f2d5a82ebbf63 @@ -398,7 +410,7 @@ F test/fts3aj.test 584facbc9ac4381a7ec624bfde677340ffc2a5a4 F test/fts3ak.test bd14deafe9d1586e8e9bf032411026ac4f8c925d F test/fts3al.test 07d64326e79bbdbab20ee87fc3328fbf01641c9f F test/fts3am.test 218aa6ba0dfc50c7c16b2022aac5c6be593d08d8 -F test/fts3an.test 931fa21bd80641ca594bfa32e105250a8a07918b +F test/fts3an.test a49ccadc07a2f7d646ec1b81bc09da2d85a85b18 F test/fts3ao.test 0aa29dd4fc1c8d46b1f7cfe5926f7ac97551bea9 F test/fts3atoken.test 25c2070e1e8755d414bf9c8200427b277a9f99fa F test/fts3b.test e93bbb653e52afde110ad53bbd793f14fe7a8984 @@ -410,34 +422,34 @@ F test/fts3expr.test 5e745b2b6348499d9ef8d59015de3182072c564c F test/fts3expr2.test 18da930352e5693eaa163a3eacf96233b7290d1a F test/fts3malloc.test 059592c4f37ccd30138bbf8e3e5b7982cb5c8f2e F test/fts3near.test 2e318ee434d32babd27c167142e2b94ddbab4844 -F test/fts3query.test 154fe4b015fd61af523ee083570a134f508f5be7 -F test/fts3rnd.test 2f5761db9dd92f6fe09d08976ac658ef521846ed +F test/fts3query.test 2468caf7938dbc3be2e049524320ce4faf2227b3 +F test/fts3rnd.test 707533ce943f490443ce5e696236bb1675a37635 F test/fts3snippet.test 9f9a4a7e396c5d8ce2898be65ebabc429555430f F test/fts4aa.test eadf85621c0a113d4c7ad3ccbf8441130e007b8f F test/func.test 6c5ce11e3a0021ca3c0649234e2d4454c89110ca F test/func2.test 772d66227e4e6684b86053302e2d74a2500e1e0f -F test/fuzz.test a4174c3009a3e2c2e14b31b364ebf7ddb49de2c9 +F test/fuzz.test 77fd50afc12847af50fcf1941679d90adebadde6 F test/fuzz2.test 207d0f9d06db3eaf47a6b7bfc835b8e2fc397167 F test/fuzz3.test aec64345184d1662bd30e6a17851ff659d596dc5 F test/fuzz_common.tcl a87dfbb88c2a6b08a38e9a070dabd129e617b45b -F test/fuzz_malloc.test 4eca9d345f06d5b0b0105f7a2ef9e7f22658827b -F test/hook.test c9c992f2914977072a71e98df3bfcad1f47737c9 +F test/fuzz_malloc.test dd7001ac86d09c154a7dff064f4739c60e2b312c +F test/hook.test f04c3412463f8ec117c1c704c74ca0f627ce733a F test/icu.test 1fc0ff9a3bafc80abf679b11afc0f8a3ce995abd F test/in.test d49419c6df515852f477fa513f3317181d46bc92 F test/in2.test 5d4c61d17493c832f7d2d32bef785119e87bde75 F test/in3.test 3cbf58c87f4052cee3a58b37b6389777505aa0c0 F test/in4.test 64f3cc1acde1b9161ccdd8e5bde3daefdb5b2617 -F test/incrblob.test 54ac96eacab29215f1e1513f3b6843ebd0242eac +F test/incrblob.test e557f262cd2cc088e6bb4d154575a1bbe242edcd F test/incrblob2.test edc3a96e557bd61fb39acc8d2edd43371fbbaa19 F test/incrblob_err.test c577c91d4ed9e8336cdb188b15d6ee2a6fe9604e -F test/incrvacuum.test d0fb6ef6d747ef5c5ebe878aafa72dd3e178856b +F test/incrvacuum.test 453d1e490d8f5ad2c9b3a54282a0690d6ae56462 F test/incrvacuum2.test 9e22a794899c91b7d8c8e12eaacac8df249faafe F test/incrvacuum_ioerr.test 57d2f5777ab13fa03b87b262a4ea1bad5cfc0291 F test/index.test cbf301cdb2da43e4eac636c3400c2439af1834ad F test/index2.test ee83c6b5e3173a3d7137140d945d9a5d4fdfb9d6 -F test/index3.test 727d55dceb9a4ec36675057bb5becfc265e28ca6 +F test/index3.test 423a25c789fc8cc51aaf2a4370bbdde2d9e9eed7 F test/indexedby.test 946ca2628a521f4ced0520421a0788345abaf3dc -F test/init.test 3f9e97948cf2335c08a5e3edc3df3a26cdaa76f2 +F test/init.test 15c823093fdabbf7b531fe22cf037134d09587a7 F test/insert.test aef273dd1cee84cc92407469e6bd1b3cdcb76908 F test/insert2.test 4f3a04d168c728ed5ec2c88842e772606c7ce435 F test/insert3.test 1b7db95a03ad9c5013fdf7d6722b6cd66ee55e30 @@ -446,7 +458,7 @@ F test/insert5.test 1f93cbe9742110119133d7e8e3ccfe6d7c249766 F test/intarray.test 066b7d7ac38d25bf96f87f1b017bfc687551cdd4 F test/interrupt.test 42e7cf98646fd9cb4a3b131a93ed3c50b9e149f1 F test/intpkey.test 537669fd535f62632ca64828e435b9e54e8d677f -F test/io.test e7bd58edb4e2131a8ecd81b4b00af3ee5c79d464 +F test/io.test 1b895d6774491895cbc75659969f07ca01860c88 F test/ioerr.test 390785ec65f10aa58a82b048ee12e9052d783fa8 F test/ioerr2.test 1b56cb80d5b0726ee3ba325ca175734541e32955 F test/ioerr3.test d3cec5e1a11ad6d27527d0d38573fbff14c71bdd @@ -459,8 +471,10 @@ F test/join4.test 1a352e4e267114444c29266ce79e941af5885916 F test/join5.test 86675fc2919269aa923c84dd00ee4249b97990fe F test/join6.test bf82cf3f979e9eade83ad0d056a66c5ed71d1901 F test/journal1.test 36f2d1bb9bf03f790f43fbdb439e44c0657fab19 -F test/jrnlmode.test a765844f22b3f6d72d78a68d5decd26c64bb859c -F test/jrnlmode2.test fe79ea1f0375c926b8de0362ddf94f34a64135fd +F test/journal2.test 50a3604768494d4a337f194f0a9480e7c57dcb72 +F test/journal3.test ff175219be1b02d2f7e54297ad7e491b7533edb6 +F test/jrnlmode.test 2d5a8b6d68de8512f522532731d90ca96912f3b7 +F test/jrnlmode2.test a19e28de1a6ec898067e46a122f1b71c9323bf00 F test/jrnlmode3.test cfcdb12b90e640a23b92785a002d96c0624c8710 F test/keyword1.test a2400977a2e4fde43bf33754c2929fda34dbca05 F test/lastinsert.test 474d519c68cb79d07ecae56a763aa7f322c72f51 @@ -471,16 +485,17 @@ F test/limit.test 2db7b3b34fb925b8e847d583d2eb67531d0ce67e F test/loadext.test 0393ce12d9616aa87597dd0ec88181de181f6db0 F test/loadext2.test 0bcaeb4d81cd5b6e883fdfea3c1bdbe1f173cbca F test/lock.test 842e80b6be816c79525a20b098cca066989feed7 -F test/lock2.test 7bb642551df59b3de135291d62ee82409420181e +F test/lock2.test 5242d8ac4e2d59c403aebff606af449b455aceff F test/lock3.test f271375930711ae044080f4fe6d6eda930870d00 -F test/lock4.test f4f36271aa5ae1da449646bf43c7341f6b2b4c4e -F test/lock5.test 6b1f78f09ad1522843dad571b76b321e6f439bf7 -F test/lock6.test 862aa71e97b288d6b3f92ba3313f51bd0b003776 +F test/lock4.test c82268c031d39345d05efa672f80b025481b3ae5 +F test/lock5.test b2abb5e711bc59b0eae00f6c97a36ec9f458fada +F test/lock6.test 8df56060f396151777390982422c800d026e1722 F test/lock7.test 64006c84c1c616657e237c7ad6532b765611cf64 -F test/lookaside.test 1dd350dc6dff015c47c07fcc5a727a72fc5bae02 -F test/main.test 2be2352ac77ac5b238c6337a5469aeeef57677e6 +F test/lock_common.tcl e7013c6208f5fa818735c324eb0249b4c0f317cf +F test/lookaside.test 382e7bc2fab23d902c8eafb1b9ed7ababfff75a6 +F test/main.test 9d7bbfcc1b52c88ba7b2ba6554068ecf9939f252 F test/make-where7.tcl 05c16b5d4f5d6512881dfec560cb793915932ef9 -F test/malloc.test d23580e15c33ee0353717129421b077541e910dc +F test/malloc.test 927e6c8668a1d48c23aa6189bda02aff5a1b83de F test/malloc3.test 4bc57f850b212f706f3e1b37c4eced1d5a727cd1 F test/malloc4.test 957337613002b7058a85116493a262f679f3a261 F test/malloc5.test 4d16d1bb26d2deddd7c4f480deec341f9b2d0e22 @@ -489,87 +504,93 @@ F test/malloc7.test 7c68a32942858bc715284856c5507446bba88c3a F test/malloc8.test 9b7a3f8cb9cf0b12fff566e80a980b1767bd961d F test/malloc9.test 2307c6ee3703b0a21391f3ea92388b4b73f9105e F test/mallocA.test 4b650c745aab289079454f4d1c02abe5c97ab6b3 -F test/mallocAll.test 2a2222a5e447be6c6579055a9a26e507e4586f4e +F test/mallocAll.test 98f1be74bc9f49a858bc4f361fc58e26486798be F test/mallocB.test bc475ab850cda896142ab935bbfbc74c24e51ed6 -F test/mallocC.test 7fcfb7c6cab30dc90d0fe3f2d5e3bcda5de33761 +F test/mallocC.test 3dffe16532f109293ce1ccecd0c31dca55ef08c4 F test/mallocD.test f78c295e8e18ea3029e65ca08278690e00c22100 F test/mallocE.test db1ed69d7eded1b080952e2a7c37f364ad241b08 F test/mallocF.test 2d5c590ebc2fc7f0dcebdf5aa8498b9aed69107e F test/mallocG.test 4584d0d8ddb8009f16ca0c8bab1fa37f6358efa2 F test/mallocH.test 79b65aed612c9b3ed2dcdaa727c85895fd1bfbdb -F test/mallocI.test e3ea401904d010cb7c1e4b2ee8803f4a9f5b999d +F test/mallocI.test a88c2b9627c8506bf4703d8397420043a786cdb6 F test/mallocJ.test b5d1839da331d96223e5f458856f8ffe1366f62e F test/mallocK.test d79968641d1b70d88f6c01bdb9a7eb4a55582cc9 -F test/malloc_common.tcl 984baeb6c6b185e798827d1187d426acc2bc4962 +F test/malloc_common.tcl f4a04b7a733eb114a3da16eb39035cde2c851220 F test/manydb.test b3d3bc4c25657e7f68d157f031eb4db7b3df0d3c F test/memdb.test 0825155b2290e900264daaaf0334b6dfe69ea498 -F test/memleak.test d2d2a1ff7105d32dc3fdf691458cf6cba58c7217 -F test/memsubsys1.test fd8a33046b6e758e3eb93747dc4eec21fe56bf64 +F test/memleak.test 10b9c6c57e19fc68c32941495e9ba1c50123f6e2 +F test/memsubsys1.test 8fb47b7e2523f94c100f5885c5697505524de4b9 F test/memsubsys2.test 72a731225997ad5e8df89fdbeae9224616b6aecc F test/minmax.test 722d80816f7e096bf2c04f4111f1a6c1ba65453d F test/minmax2.test 33504c01a03bd99226144e4b03f7631a274d66e0 -F test/minmax3.test a38686c33b07d595e98a2fc6d3aa84a5e886a972 -F test/misc1.test 1b89c02c4a33b49dee4cd1d20d161aaaba719075 +F test/minmax3.test 66a60eb0f20281b0753249d347c5de0766954cee +F test/misc1.test e56baf44656dd68d6475a4b44521045a60241e9b F test/misc2.test a628db7b03e18973e5d446c67696b03de718c9fd F test/misc3.test 72c5dc87a78e7865c5ec7a969fc572913dbe96b6 F test/misc4.test 91e8ed25c092c2bb4e0bb01864631e2930f8d7de -F test/misc5.test 6a5c1e3217a95b0db05ff9a0f1ecb5ce9043ffef +F test/misc5.test 45b2e3ed5f79af2b4f38ae362eaf4c49674575bd F test/misc6.test 953cc693924d88e6117aeba16f46f0bf5abede91 F test/misc7.test c5f4e6a82e04e71820c0f9f64f6733f04c8ae0ae F test/misuse.test 30b3a458e5a70c31e74c291937b6c82204c59f33 F test/mutex1.test 5b71777fc127509cd257910c8db799de557a02de F test/mutex2.test bfeaeac2e73095b2ac32285d2756e3a65e681660 -F test/nan.test cf555724e5a26aed2296a3f2637feee9f728cd81 +F test/nan.test a44e04df1486fcfb02d32468cbcd3c8e1e433723 F test/notify1.test 8433bc74bd952fb8a6e3f8d7a4c2b28dfd69e310 F test/notify2.test 195a467e021f74197be2c4fb02d6dee644b8d8db +F test/notify3.test 7eeba3628c4e707c004b72a2489c48fbdbc5c2ee F test/notnull.test cc7c78340328e6112a13c3e311a9ab3127114347 F test/null.test a8b09b8ed87852742343b33441a9240022108993 F test/openv2.test af02ed0a9cbc0d2a61b8f35171d4d117e588e4ec -F test/pageropt.test 3ee6578891baaca967f0bd349e4abfa736229e1a +F test/pager1.test d8e4b2bc8164c920e6ea0572c9e13576d6e4f3fa +F test/pager2.test f5c757c271ce642d36a393ecbfb3aef1c240dcef +F test/pagerfault.test a4c0bb8900b8dbf5fcbe41ee2a96148e22174bcb +F test/pagerfault2.test 1f79ea40d1133b2683a2f811b00f2399f7ec2401 +F test/pageropt.test 8146bf448cf09e87bb1867c2217b921fb5857806 F test/pagesize.test 76aa9f23ecb0741a4ed9d2e16c5fa82671f28efb -F test/pcache.test eebc4420b37cb07733ae9b6e99c9da7c40dd6d58 +F test/pcache.test 4118a183908ecaed343a06fcef3ba82e87e0129d F test/pcache2.test 0d85f2ab6963aee28c671d4c71bec038c00a1d16 -F test/permutations.test 91928573ca2db2c88dbc50ab34e4a585d912b580 -F test/pragma.test 5aeb48a442dba3c3e8e38773b121371814ab3b17 +F test/permutations.test 3fe47c21c32b294b2354e702a25bfbff65747bb1 +F test/pragma.test ed78d200f65c6998df51196cb8c39d5300570f24 F test/pragma2.test 5364893491b9231dd170e3459bfc2e2342658b47 F test/printf.test 05970cde31b1a9f54bd75af60597be75a5c54fea F test/progress.test 5b075c3c790c7b2a61419bc199db87aaf48b8301 F test/ptrchng.test ef1aa72d6cf35a2bbd0869a649b744e9d84977fc -F test/quick.test d6591e74f3ac19da7fd076845f06dca48fd43cff +F test/quick.test 1681febc928d686362d50057c642f77a02c62e57 F test/quote.test 215897dbe8de1a6f701265836d6601cc6ed103e6 F test/randexpr1.tcl 40dec52119ed3a2b8b2a773bce24b63a3a746459 F test/randexpr1.test 1084050991e9ba22c1c10edd8d84673b501cc25a -F test/rdonly.test bd054831f8a3078e765a0657e247182486f0cb47 +F test/rdonly.test c267d050a1d9a6a321de502b737daf28821a518d F test/reindex.test 44edd3966b474468b823d481eafef0c305022254 -F test/rollback.test 73355ad4492ff9a3a31e61c7e5eb5e01a1de94ca -F test/rowhash.test 97f56043ba11f0679920416c0cdbc72e5272267b +F test/rollback.test 1a83118ea6db4e7d8c10eaa63871b5e90502ffdc +F test/rowhash.test 0bc1d31415e4575d10cacf31e1a66b5cc0f8be81 F test/rowid.test e58e0acef38b527ed1b0b70d3ada588f804af287 -F test/rtree.test 55466a200af3591946c5da77ad5dbfbc1e5e05f9 -F test/savepoint.test f2ede4b643ad87ead36c041c72d774a1f5c8a564 -F test/savepoint2.test 427c8b20f43d3edf17a290c6788ae9e2703ac51c +F test/rtree.test fb372aff108d4371bd0b5e63e106947587ff4310 +F test/savepoint.test 992d6429b6bce16ac172f7431975044ceaeb0803 +F test/savepoint2.test 9b8543940572a2f01a18298c3135ad0c9f4f67d7 F test/savepoint3.test e328085853b14898d78ceea00dfe7db18bb6a9ec F test/savepoint4.test c8f8159ade6d2acd9128be61e1230f1c1edc6cc0 F test/savepoint5.test 0735db177e0ebbaedc39812c8d065075d563c4fd -F test/savepoint6.test 2df1d093e59e78d688c64eb20e0457aaea7d08f9 +F test/savepoint6.test 76d3948568b2cdc0c13a671cadcae75009b183d6 F test/schema.test 8f7999be894260f151adf15c2c7540f1c6d6a481 F test/schema2.test 906408621ea881fdb496d878b1822572a34e32c5 +F test/schema3.test 1bc1008e1f8cb5654b248c55f27249366eb7ed38 F test/securedel.test 328d2921c0ca49bdd3352e516b0377fc07143254 F test/select1.test f67ca2dfc05df41c7b86eb32ca409b427a5f43b0 -F test/select2.test 9735da20ccd41e42bf2b4c19fd939141b591adae +F test/select2.test 352480e0e9c66eda9c3044e412abdf5be0215b56 F test/select3.test 2ce595f8fb8e2ac10071d3b4e424cadd4634a054 F test/select4.test 44aa6e7110592e18110b0b9cf5c024d37d23be17 F test/select5.test e758b8ef94f69b111df4cb819008856655dcd535 F test/select6.test 2b5e8500d8ec3dd4c8e0c99eb1431b3d11fcc24c F test/select7.test dad6f00f0d49728a879d6eb6451d4752db0b0abe F test/select8.test 391de11bdd52339c30580dabbbbe97e3e9a3c79d -F test/select9.test b4007b15396cb7ba2615cab31e1973b572e43210 +F test/select9.test 74c0fb2c6eecb0219cbed0cbe3df136f8fbf9343 F test/selectA.test 06d1032fa9009314c95394f2ca2e60d9f7ae8532 F test/selectB.test f305cc6660804cb239aab4e2f26b0e288b59958b -F test/selectC.test 07a45610c8b3bd878943004fd23f4cc0682bd4c0 +F test/selectC.test 33bb5673a8141df193c6fd56e6de7fea38b8d2ee F test/server1.test f5b790d4c0498179151ca8a7715a65a7802c859c F test/shared.test 3b448dc0f7a9356e641894ed81c27599f39d809d F test/shared2.test d6ba4ca1827ea36a1ac23a99e3c36eeac9165450 -F test/shared3.test 9c880afc081d797da514ef64bccf36f3fce2f09c +F test/shared3.test d69bdd5f156580876c5345652d21dc2092e85962 F test/shared4.test d0fadacb50bb6981b2fb9dc6d1da30fa1edddf83 F test/shared6.test 990d2584b5db28e6e1f24742c711b26e59757b67 F test/shared7.test 8114027cb5e8c376e467115703d46e5ac4e77739 @@ -577,8 +598,8 @@ F test/shared_err.test 91e26ec4f3fbe07951967955585137e2f18993de F test/sharedlock.test ffa0a3c4ac192145b310f1254f8afca4d553eabf F test/shortread1.test bb591ef20f0fd9ed26d0d12e80eee6d7ac8897a3 F test/sidedelete.test f0ad71abe6233e3b153100f3b8d679b19a488329 -F test/soak.test d9d0a5e5c0157115c9a17f526f12691fe146768d -F test/softheap1.test 73ebd6e020d2954d965da2072baba5922fc8fb6a +F test/soak.test 0b5b6375c9f4110c828070b826b3b4b0bb65cd5f +F test/softheap1.test c16709a16ad79fa43b32929b2e623d1d117ccf53 F test/sort.test 0e4456e729e5a92a625907c63dcdedfbe72c5dc5 F test/speed1.test f2974a91d79f58507ada01864c0e323093065452 F test/speed1p.explain d841e650a04728b39e6740296b852dccdca9b2cb @@ -589,37 +610,46 @@ F test/speed4.test abc0ad3399dcf9703abed2fff8705e4f8e416715 F test/speed4p.explain 6b5f104ebeb34a038b2f714150f51d01143e59aa F test/speed4p.test 0e51908951677de5a969b723e03a27a1c45db38b F test/sqllimits1.test e90a0ed94452076f6a10209d378e06b5f75ef0a0 -F test/stmt.test ac97e59879fd3bd52ecd60ef4efb03ba16292829 +F test/stat.test 70fe540ffb285947aead5533dfd0c8c12f17f14e +F test/stmt.test 7915bd3e8380b956c095f40f41a775a30716e649 F test/subquery.test b524f57c9574b2c0347045b4510ef795d4686796 F test/subselect.test d24fd8757daf97dafd2e889c73ea4c4272dcf4e4 F test/substr.test 18f57c4ca8a598805c4d64e304c418734d843c1a F test/sync.test ded6b39d8d8ca3c0c5518516c6371b3316d3e3a3 -F test/table.test bf102a5669c4db7a41330802f24a4a81a4204f83 +F test/table.test 04ba066432430657712d167ebf28080fe878d305 F test/tableapi.test 7262a8cbaa9965d429f1cbd2747edc185fa56516 -F test/tclsqlite.test bf4227eb236a4c097aa7974a2bf7d3225acf34be -F test/tempdb.test 1bf52da28a9c24e29717362a87722dff08feb72b +F test/tclsqlite.test 8c154101e704170c2be10f137a5499ac2c6da8d3 +F test/tempdb.test 800c36623d67a2ad1f58784b9c5644e0405af6e6 F test/temptable.test f42121a0d29a62f00f93274464164177ab1cc24a F test/temptrigger.test b0273db072ce5f37cf19140ceb1f0d524bbe9f05 -F test/tester.tcl e1f581c7a2648a0aaa51135c4d2e7be68f4b9292 +F test/tester.tcl cab2b46972cd50c3939a0e30e0b37e73f558bc2d F test/thread001.test a3e6a7254d1cb057836cb3145b60c10bf5b7e60f F test/thread002.test afd20095e6e845b405df4f2c920cb93301ca69db F test/thread003.test b824d4f52b870ae39fc5bae4d8070eca73085dca F test/thread004.test f51dfc3936184aaf73ee85f315224baad272a87f F test/thread005.test bf5c374ca65dd89fd56c8fe511ccfb46875bda5e F test/thread1.test 862dd006d189e8b0946935db17399dcac2f8ef91 -F test/thread2.test 6e0997f7beabb6a7e471bd18740ed04805c785f4 -F test/thread_common.tcl b65e6b1d1d90dc885e10ad080896c6c56eef0819 +F test/thread2.test e08034b83fe9693ade77049732518e5b3d2d700d +F test/thread_common.tcl 2aa6f2fdcd4d6e461169c3e5ca098eebf643b863 F test/threadtest1.c 6029d9c5567db28e6dc908a0c63099c3ba6c383b F test/threadtest2.c ace893054fa134af3fc8d6e7cfecddb8e3acefb9 +F test/tkt-02a8e81d44.test 58494de77be2cf249228ada3f313fa399821c6ab +F test/tkt-26ff0c2d1e.test 888324e751512972c6e0d1a09df740d8f5aaf660 F test/tkt-2ea2425d34.test 1cf13e6f75d149b3209a0cb32927a82d3d79fb28 F test/tkt-31338dca7e.test 5741cd48de500347a437ba1be58c8335e83c5a5e F test/tkt-3fe897352e.test 10de1a67bd5c66b238a4c96abe55531b37bb4f00 F test/tkt-4a03edc4c8.test 2865e4edbc075b954daa82f8da7cc973033ec76e F test/tkt-5ee23731f.test 3581260f2a71e51db94e1506ba6b0f7311d002a9 F test/tkt-78e04e52ea.test fb5430c675e708f5cbafdf3e7e5593da5145a527 +F test/tkt-80e031a00f.test 9a154173461a4dbe2de49cda73963e04842d52f7 F test/tkt-94c04eaadb.test be5ea61cb04dfdc047d19b5c5a9e75fa3da67a7f +F test/tkt-9d68c883.test 458f7d82a523d7644b54b497c986378a7d8c8b67 +F test/tkt-cbd054fa6b.test f14f97ea43662e6f70c9e63287081e8be5d9d589 +F test/tkt-d11f09d36e.test fb44f7961aa6d4b632fb7b9768239832210b5fc7 F test/tkt-d82e3f3721.test 731359dfdcdb36fea0559cd33fec39dd0ceae8e6 F test/tkt-f777251dc7a.test 6f24c053bc5cdb7e1e19be9a72c8887cf41d5e87 +F test/tkt-f973c7ac31.test 1da0ed15ec2c7749fb5ce2828cd69d07153ad9f4 +F test/tkt-fc62af4523.test 72825d3febdedcd5593a27989fc05accdbfc2bb4 F test/tkt1435.test f8c52c41de6e5ca02f1845f3a46e18e25cadac00 F test/tkt1443.test bacc311da5c96a227bf8c167e77a30c99f8e8368 F test/tkt1444.test a9d72f9e942708bd82dde6c707da61c489e213e9 @@ -671,7 +701,6 @@ F test/tkt3424.test 61f831bd2b071bd128fa5d00fbda57e656ca5812 F test/tkt3442.test 89d7b41a4ec4d9d9b40ab8575d648579fb13cb4f F test/tkt3457.test edbf54b05cbe5165f00192becbd621038f1615e4 F test/tkt3461.test 228ea328a5a21e8663f80ee3d212a6ad92549a19 -F test/tkt3472.test 98c7e54b8fef2b1266a552a66c8e5d88a6908d1d F test/tkt3493.test 1686cbde85f8721fc1bdc0ee72f2ef2f63139218 F test/tkt3508.test d75704db9501625c7f7deec119fcaf1696aefb7d F test/tkt3522.test 22ce2ebbcb04a6be56c0977d405c207967318fd6 @@ -706,7 +735,7 @@ F test/tkt3997.test a335fa41ca3985660a139df7b734a26ef53284bd F test/tkt4018.test 7c2c9ba4df489c676a0a7a0e809a1fb9b2185bd1 F test/tokenize.test ce430a7aed48fc98301611429595883fdfcab5d7 F test/trace.test 4b36a41a3e9c7842151af6da5998f5080cdad9e5 -F test/trans.test d887cb07630dc39879a322d958ad8b006137485c +F test/trans.test 6e1b4c6a42dba31bd65f8fa5e61a2708e08ddde6 F test/trans2.test d5337e61de45e66b1fcbf9db833fa8c82e624b22 F test/trans3.test d728abaa318ca364dc370e06576aa7e5fbed7e97 F test/trigger1.test 2e18561f85e448bb633c9c9de792e9bbf7b2dd3e @@ -715,12 +744,12 @@ F test/trigger3.test d2c60d8be271c355d61727411e753181e877230a F test/trigger4.test 8e90ee98cba940cd5f96493f82e55083806ab8a0 F test/trigger5.test 619391a3e9fc194081d22cefd830d811e7badf83 F test/trigger6.test 0e411654f122552da6590f0b4e6f781048a4a9b9 -F test/trigger7.test 72feaf8dbc52cea84de0c3e6ce7559ff19c479af +F test/trigger7.test b39e6dee1debe0ff9c2ef66326668f149f07c9c4 F test/trigger8.test 30cb0530bd7c4728055420e3f739aa00412eafa4 F test/trigger9.test 5b0789f1c5c4600961f8e68511b825b87be53e31 -F test/triggerA.test 0718ad2d9bfef27c7af00e636df79bee6b988da7 +F test/triggerA.test eaf11a29db2a11967d2d4b49d37f92bce598194e F test/triggerB.test 56780c031b454abac2340dbb3b71ac5c56c3d7fe -F test/triggerC.test 4083c64d80854d271bad211268a08985f3d61cbd +F test/triggerC.test cac41fe31adc1abb9fa08532762fc2b4f662ab03 F test/triggerD.test c6add3817351451e419f6ff9e9a259b02b6e2de7 F test/types.test 9a825ec8eea4e965d7113b74c76a78bb5240f2ac F test/types2.test 3555aacf8ed8dc883356e59efc314707e6247a84 @@ -728,12 +757,12 @@ F test/types3.test a0f66bf12f80fad89493535474f7a6d16fa58150 F test/unique.test 083c7fff74695bcc27a71d75699deba3595bc9c2 F test/update.test 8bc86fd7ef1a00014f76dc6a6a7c974df4aef172 F test/utf16align.test 54cd35a27c005a9b6e7815d887718780b6a462ae -F test/vacuum.test 68e39b2228b4b772166debef4a82accf6ddd32f3 +F test/vacuum.test 15ae6784e70428b8db64e95c92d84b19e507b719 F test/vacuum2.test ec57f21d394b7b72249b11f8e4b5d487bab56539 F test/vacuum3.test f39ad1428347c5808cd2da7578c470f186a4d0ce F test/vacuum4.test d3f8ecff345f166911568f397d2432c16d2867d9 F test/varint.test ab7b110089a08b9926ed7390e7e97bdefeb74102 -F test/veryquick.test e265401afefa994cdf2fe4b6f286b1e87c2f9b9d +F test/veryquick.test 7701bb609fe8bf6535514e8b849a309e8f00573b F test/view.test 45f518205ecdb6dd23a86dd4a99bb4ae945e625d F test/vtab1.test 9bc4a349a1989bcd064eb3b8fac2f06aca64297a F test/vtab2.test 7bcffc050da5c68f4f312e49e443063e2d391c0d @@ -752,14 +781,29 @@ F test/vtabE.test 7c4693638d7797ce2eda17af74292b97e705cc61 F test/vtab_alter.test 9e374885248f69e251bdaacf480b04a197f125e5 F test/vtab_err.test 0d4d8eb4def1d053ac7c5050df3024fd47a3fbd8 F test/vtab_shared.test 0eff9ce4f19facbe0a3e693f6c14b80711a4222d +F test/wal.test 1891e6f72dd437a1c2a48091aa9182ba17a8f780 +F test/wal2.test fa6dc4457b46988f46cf6c68ea51ebe341765f4a +F test/wal3.test d2ae7e66f973bd6b58ce49e546b2c00f44fe0485 +F test/wal4.test 3404b048fa5e10605facaf70384e6d2943412e30 +F test/wal_common.tcl 895d76138043b86bdccf36494054bdabcf65837b +F test/walbak.test 4df1c7369da0301caeb9a48fa45997fd592380e4 +F test/walbig.test e882bc1d014afffbfa2b6ba36e0f07d30a633ad0 +F test/walcksum.test a37b36375c595e61bdb7e1ec49b5f0979b6fc7ce +F test/walcrash.test e763841551d6b23677ccb419797c1589dcbdbaf5 +F test/walcrash2.test 019d60b89d96c1937adb2b30b850ac7e86e5a142 +F test/walfault.test 05c470688d742688e455dd56816bd6bcffa298f8 +F test/walhook.test ed00a40ba7255da22d6b66433ab61fab16a63483 +F test/walmode.test 5dc3008ef71988ecdd949ea16e5750e325b92b54 +F test/walslow.test d21625e2e99e11c032ce949e8a94661576548933 +F test/walthread.test a25a393c068a2b42b44333fa3fdaae9072f1617c F test/where.test de337a3fe0a459ec7c93db16a519657a90552330 -F test/where2.test 45eacc126aabb37959a387aa83e59ce1f1f03820 -F test/where3.test 97d3936e6a443b968f1a61cdcc0f673252000e94 +F test/where2.test 43d4becaf5a5df854e6c21d624a1cb84c6904554 +F test/where3.test aa44a9b29e8c9f3d7bb94a3bb3a95b31627d520d F test/where4.test e9b9e2f2f98f00379e6031db6a6fca29bae782a2 F test/where5.test fdf66f96d29a064b63eb543e28da4dfdccd81ad2 F test/where6.test 5da5a98cec820d488e82708301b96cb8c18a258b -F test/where7.test fdd58ab9dec9f97679e65d4414bf5e07d725d79f -F test/where8.test 2bb8ea44b745fcc93db150fac9ce33d12e499760 +F test/where7.test a0a92b8ce48d9c027fbdd7b764c7de1e1213575a +F test/where8.test a6c740fd286d7883e274e17b6230a9d672a7ab1f F test/where8m.test da346596e19d54f0aba35ebade032a7c47d79739 F test/where9.test be19e1a92f80985c1a121b4678bf7d2123eaa623 F test/whereA.test 24c234263c8fe358f079d5e57d884fb569d2da0a @@ -770,39 +814,40 @@ F tool/diffdb.c 7524b1b5df217c20cd0431f6789851a4e0cb191b F tool/fragck.tcl 5265a95126abcf6ab357f7efa544787e5963f439 F tool/genfkey.README cf68fddd4643bbe3ff8e31b8b6d8b0a1b85e20f4 F tool/genfkey.test 4196a8928b78f51d54ef58e99e99401ab2f0a7e5 -F tool/lemon.c 6958cb9935be265bf51dbc718ef325e3b77685b6 +F tool/lemon.c fe890e2d8d2db1e3f57e2a22503dbb0f6843e517 F tool/lempar.c 01ca97f87610d1dac6d8cd96ab109ab1130e76dc F tool/mkkeywordhash.c d2e6b4a5965e23afb80fbe74bb54648cd371f309 F tool/mkopts.tcl 66ac10d240cc6e86abd37dc908d50382f84ff46e F tool/mkspeedsql.tcl a1a334d288f7adfe6e996f2e712becf076745c97 -F tool/mksqlite3c.tcl 4c6924c7e877defa8f9a12ef1e6867de614acf3f +F tool/mksqlite3c.tcl aff0d53f0e84cf919922c0d02e767bdf5eeafb90 F tool/mksqlite3h.tcl eb100dce83f24b501b325b340f8b5eb8e5106b3b F tool/mksqlite3internalh.tcl 7b43894e21bcb1bb39e11547ce7e38a063357e87 F tool/omittest.tcl 27d6f6e3b1e95aeb26a1c140e6eb57771c6d794a F tool/opcodeDoc.awk b3a2a3d5d3075b8bd90b7afe24283efdd586659c F tool/restore_jrnl.tcl 6957a34f8f1f0f8285e07536225ec3b292a9024a -F tool/shell1.test ef08a3e738b9fee4fc228920956950bc35db0575 -F tool/shell2.test 8f51f61c13b88618e71c17439fe0847c2421c5d1 -F tool/shell3.test ff663e83100670a295d473515c12beb8103a78b6 -F tool/showdb.c 8ab8b3b53884312aafb7ef60982e255a6c31d238 +F tool/shell1.test a738c71bc08ea9162baee9a14b3cf9994f181921 +F tool/shell2.test 5dc76b8005b465f420fed8241621da7513060ff3 +F tool/shell3.test 4fad469e8003938426355afdf34155f08c587836 +F tool/showdb.c 01c20e8181941b714fe07f72c64a7560fee17ff9 F tool/showjournal.c ec3b171be148656827c4949fbfb8ab4370822f87 +F tool/showwal.c f09e5a80a293919290ec85a6a37c85a5ddcf37d9 F tool/soak1.tcl 8d407956e1a45b485a8e072470a3e629a27037fe F tool/space_used.tcl f714c41a59e326b8b9042f415b628b561bafa06b -F tool/spaceanal.tcl b87db46ae29e3116411b1686e136b9b994d7de39 +F tool/spaceanal.tcl b91879d52bf77a1ff5382493284f429d32a63490 F tool/speedtest.tcl 06c76698485ccf597b9e7dbb1ac70706eb873355 F tool/speedtest16.c c8a9c793df96db7e4933f0852abb7a03d48f2e81 F tool/speedtest2.tcl ee2149167303ba8e95af97873c575c3e0fab58ff F tool/speedtest8.c 2902c46588c40b55661e471d7a86e4dd71a18224 F tool/speedtest8inst1.c 293327bc76823f473684d589a8160bde1f52c14e F tool/vdbe-compress.tcl d70ea6d8a19e3571d7ab8c9b75cba86d1173ff0f -P b1f342a6643829020beef542a0700d90822e6467 -R 590a011d9441875bded2c173c371f1c1 +P 13ed106c8c279422a6159e28c6887d13a88b7b8b +R 48f3b6e106f37a4303da6eef05383891 U drh -Z f7171db765620a4ccff85bc2ced22202 +Z 6f03f192e574b328cbf6ee712ebb1776 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.6 (GNU/Linux) -iD8DBQFLrTT6oxKgR168RlERArP2AJ0UeA3bKxomgH1prR7M+4tHuMlN7wCfXBZE -Jr7IGDFSJ/vsM6VpeiKkkf4= -=diSA +iD8DBQFMRx1eoxKgR168RlERAkrAAJ9jYeUOTv2JXqP7/ZLTAQf2A6QoNACeLuA0 +lTLlFWVnujV93G1zfacFBTo= +=MCKn -----END PGP SIGNATURE----- diff --git a/manifest.uuid b/manifest.uuid index 758890d..2c6c3cb 100644 --- a/manifest.uuid +++ b/manifest.uuid @@ -1 +1 @@ -b078b588d617e07886ad156e9f54ade6d823568e +b36b105eab6fd3195f4bfba6cb5cda0f063b7460 diff --git a/publish.sh b/publish.sh index c09f13a..6c4dea1 100644 --- a/publish.sh +++ b/publish.sh @@ -34,19 +34,6 @@ gzip sqlite3-$VERS.bin chmod 644 sqlite3-$VERS.bin.gz mv sqlite3-$VERS.bin.gz doc -# Build a source archive useful for windows. -# -make target_source -cd tsrc -echo '***** BUILDING preprocessed source archives' -rm fts[12]* icu* -rm -f ../doc/sqlite-source-$VERSW.zip -zip ../doc/sqlite-source-$VERSW.zip * -cd .. -cp tsrc/sqlite3.h tsrc/sqlite3ext.h . -pwd -zip doc/sqlite-amalgamation-$VERSW.zip sqlite3.c sqlite3.h sqlite3ext.h - # Build the sqlite.so and tclsqlite.so shared libraries # under Linux # @@ -87,6 +74,20 @@ i386-mingw32msvc-gcc -Os $OPTS -Itsrc -I$TCLDIR sqlite3.c tsrc/shell.c \ -o sqlite3.exe zip doc/sqlite-$VERSW.zip sqlite3.exe +# Build a source archive useful for windows. +# +make target_source +cd tsrc +echo '***** BUILDING preprocessed source archives' +rm fts[12]* icu* +rm -f ../doc/sqlite-source-$VERSW.zip +zip ../doc/sqlite-source-$VERSW.zip * +cd .. +cp tsrc/sqlite3.h tsrc/sqlite3ext.h . +cp tsrc/shell.c . +pwd +zip doc/sqlite-amalgamation-$VERSW.zip sqlite3.c sqlite3.h sqlite3ext.h shell.c sqlite3.def + # Construct a tarball of the source tree # echo '***** BUILDING source archive' diff --git a/src/alter.c b/src/alter.c index 694b243..359c4e7 100644 --- a/src/alter.c +++ b/src/alter.c @@ -226,17 +226,23 @@ static void renameTriggerFunc( /* ** Register built-in functions used to help implement ALTER TABLE */ -void sqlite3AlterFunctions(sqlite3 *db){ - sqlite3CreateFunc(db, "sqlite_rename_table", 2, SQLITE_UTF8, 0, - renameTableFunc, 0, 0); +void sqlite3AlterFunctions(void){ + static SQLITE_WSD FuncDef aAlterTableFuncs[] = { + FUNCTION(sqlite_rename_table, 2, 0, 0, renameTableFunc), #ifndef SQLITE_OMIT_TRIGGER - sqlite3CreateFunc(db, "sqlite_rename_trigger", 2, SQLITE_UTF8, 0, - renameTriggerFunc, 0, 0); + FUNCTION(sqlite_rename_trigger, 2, 0, 0, renameTriggerFunc), #endif #ifndef SQLITE_OMIT_FOREIGN_KEY - sqlite3CreateFunc(db, "sqlite_rename_parent", 3, SQLITE_UTF8, 0, - renameParentFunc, 0, 0); + FUNCTION(sqlite_rename_parent, 3, 0, 0, renameParentFunc), #endif + }; + int i; + FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); + FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aAlterTableFuncs); + + for(i=0; iflags */ + + savedDbFlags = db->flags; if( NEVER(db->mallocFailed) ) goto exit_rename_table; assert( pSrc->nSrc==1 ); assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); @@ -389,6 +397,7 @@ void sqlite3AlterRenameTable( if( !pTab ) goto exit_rename_table; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); zDb = db->aDb[iDb].zName; + db->flags |= SQLITE_PreferBuiltin; /* Get a NULL terminated version of the new table name. */ zName = sqlite3NameFromToken(db, pName); @@ -556,6 +565,7 @@ void sqlite3AlterRenameTable( exit_rename_table: sqlite3SrcListDelete(db, pSrc); sqlite3DbFree(db, zName); + db->flags = savedDbFlags; } @@ -675,9 +685,11 @@ void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); if( zCol ){ char *zEnd = &zCol[pColDef->n-1]; + int savedDbFlags = db->flags; while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ *zEnd-- = '\0'; } + db->flags |= SQLITE_PreferBuiltin; sqlite3NestedParse(pParse, "UPDATE \"%w\".%s SET " "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " @@ -686,6 +698,7 @@ void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ zTab ); sqlite3DbFree(db, zCol); + db->flags = savedDbFlags; } /* If the default value of the new column is NULL, then set the file diff --git a/src/analyze.c b/src/analyze.c index 283a0e9..94cf3a3 100644 --- a/src/analyze.c +++ b/src/analyze.c @@ -36,7 +36,7 @@ static void openStatTable( int iStatCur, /* Open the sqlite_stat1 table on this cursor */ const char *zWhere /* Delete entries associated with this table */ ){ - static struct { + static const struct { const char *zName; const char *zCols; } aTable[] = { @@ -618,12 +618,16 @@ int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ n = 24; } pSample->nByte = (u8)n; - pSample->u.z = sqlite3DbMallocRaw(dbMem, n); - if( pSample->u.z ){ - memcpy(pSample->u.z, z, n); + if( n < 1){ + pSample->u.z = 0; }else{ - db->mallocFailed = 1; - break; + pSample->u.z = sqlite3DbMallocRaw(dbMem, n); + if( pSample->u.z ){ + memcpy(pSample->u.z, z, n); + }else{ + db->mallocFailed = 1; + break; + } } } } diff --git a/src/attach.c b/src/attach.c index 6f0acef..30a4207 100644 --- a/src/attach.c +++ b/src/attach.c @@ -143,7 +143,6 @@ static void attachFunc( } pPager = sqlite3BtreePager(aNew->pBt); sqlite3PagerLockingMode(pPager, db->dfltLockMode); - sqlite3PagerJournalMode(pPager, db->dfltJournalMode); sqlite3BtreeSecureDelete(aNew->pBt, sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) ); } @@ -288,7 +287,7 @@ detach_error: static void codeAttach( Parse *pParse, /* The parser context */ int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ - FuncDef *pFunc, /* FuncDef wrapper for detachFunc() or attachFunc() */ + FuncDef const *pFunc,/* FuncDef wrapper for detachFunc() or attachFunc() */ Expr *pAuthArg, /* Expression to pass to authorization callback */ Expr *pFilename, /* Name of database file */ Expr *pDbname, /* Name of the database to use internally */ @@ -358,7 +357,7 @@ attach_end: ** DETACH pDbname */ void sqlite3Detach(Parse *pParse, Expr *pDbname){ - static FuncDef detach_func = { + static const FuncDef detach_func = { 1, /* nArg */ SQLITE_UTF8, /* iPrefEnc */ 0, /* flags */ @@ -379,7 +378,7 @@ void sqlite3Detach(Parse *pParse, Expr *pDbname){ ** ATTACH p AS pDbname KEY pKey */ void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ - static FuncDef attach_func = { + static const FuncDef attach_func = { 3, /* nArg */ SQLITE_UTF8, /* iPrefEnc */ 0, /* flags */ diff --git a/src/backup.c b/src/backup.c index 2d01ddb..41fa2c3 100644 --- a/src/backup.c +++ b/src/backup.c @@ -217,7 +217,7 @@ static int backupOnePage(sqlite3_backup *p, Pgno iSrcPg, const u8 *zSrcData){ /* Catch the case where the destination is an in-memory database and the ** page sizes of the source and destination differ. */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(sqlite3BtreePager(p->pDest)) ){ + if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ rc = SQLITE_READONLY; } @@ -287,6 +287,9 @@ static void attachBackupObject(sqlite3_backup *p){ */ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ int rc; + int destMode; /* Destination journal mode */ + int pgszSrc = 0; /* Source page size */ + int pgszDest = 0; /* Destination page size */ sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3BtreeEnter(p->pSrc); @@ -327,13 +330,21 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ rc = sqlite3BtreeBeginTrans(p->pSrc, 0); bCloseTrans = 1; } + + /* Do not allow backup if the destination database is in WAL mode + ** and the page sizes are different between source and destination */ + pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); + pgszDest = sqlite3BtreeGetPageSize(p->pDest); + destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); + if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ + rc = SQLITE_READONLY; + } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ - if( rc==SQLITE_OK ){ - rc = sqlite3PagerPagecount(pSrcPager, &nSrcPage); - } + nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); + assert( nSrcPage>=0 ); for(ii=0; (nPage<0 || iiiNext<=(Pgno)nSrcPage && !rc; ii++){ const Pgno iSrcPg = p->iNext; /* Source page number */ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ @@ -364,8 +375,6 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ if( rc==SQLITE_DONE && (rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1))==SQLITE_OK ){ - const int nSrcPagesize = sqlite3BtreeGetPageSize(p->pSrc); - const int nDestPagesize = sqlite3BtreeGetPageSize(p->pDest); int nDestTruncate; if( p->pDestDb ){ @@ -384,18 +393,20 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ - if( nSrcPagesizepSrc) ); + assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); + if( pgszSrcpDest->pBt) ){ nDestTruncate--; } }else{ - nDestTruncate = nSrcPage * (nSrcPagesize/nDestPagesize); + nDestTruncate = nSrcPage * (pgszSrc/pgszDest); } sqlite3PagerTruncateImage(pDestPager, nDestTruncate); - if( nSrcPagesize= iSize || ( + assert( (i64)nDestTruncate*(i64)pgszDest >= iSize || ( nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) - && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+nDestPagesize + && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest )); if( SQLITE_OK==(rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1)) && SQLITE_OK==(rc = backupTruncateFile(pFile, iSize)) && SQLITE_OK==(rc = sqlite3PagerSync(pDestPager)) ){ i64 iOff; - i64 iEnd = MIN(PENDING_BYTE + nDestPagesize, iSize); + i64 iEnd = MIN(PENDING_BYTE + pgszDest, iSize); for( - iOff=PENDING_BYTE+nSrcPagesize; + iOff=PENDING_BYTE+pgszSrc; rc==SQLITE_OK && iOffrc = rc; } if( p->pDestDb ){ diff --git a/src/btree.c b/src/btree.c index dbcdf65..f47614a 100644 --- a/src/btree.c +++ b/src/btree.c @@ -516,11 +516,8 @@ static void invalidateIncrblobCursors( static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ int rc = SQLITE_OK; if( !pBt->pHasContent ){ - int nPage = 100; - sqlite3PagerPagecount(pBt->pPager, &nPage); - /* If sqlite3PagerPagecount() fails there is no harm because the - ** nPage variable is unchanged from its default value of 100 */ - pBt->pHasContent = sqlite3BitvecCreate((u32)nPage); + assert( pgno<=pBt->nPage ); + pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage); if( !pBt->pHasContent ){ rc = SQLITE_NOMEM; } @@ -1563,13 +1560,13 @@ static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ ** Return the size of the database file in pages. If there is any kind of ** error, return ((unsigned int)-1). */ -static Pgno pagerPagecount(BtShared *pBt){ - int nPage = -1; - int rc; - assert( pBt->pPage1 ); - rc = sqlite3PagerPagecount(pBt->pPager, &nPage); - assert( rc==SQLITE_OK || nPage==-1 ); - return (Pgno)nPage; +static Pgno btreePagecount(BtShared *pBt){ + return pBt->nPage; +} +u32 sqlite3BtreeLastPage(Btree *p){ + assert( sqlite3BtreeHoldsMutex(p) ); + assert( ((p->pBt->nPage)&0x8000000)==0 ); + return (int)btreePagecount(p->pBt); } /* @@ -1586,25 +1583,22 @@ static int getAndInitPage( MemPage **ppPage /* Write the page pointer here */ ){ int rc; - TESTONLY( Pgno iLastPg = pagerPagecount(pBt); ) assert( sqlite3_mutex_held(pBt->mutex) ); - rc = btreeGetPage(pBt, pgno, ppPage, 0); - if( rc==SQLITE_OK ){ - rc = btreeInitPage(*ppPage); - if( rc!=SQLITE_OK ){ - releasePage(*ppPage); + if( pgno>btreePagecount(pBt) ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = btreeGetPage(pBt, pgno, ppPage, 0); + if( rc==SQLITE_OK ){ + rc = btreeInitPage(*ppPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + } } } - /* If the requested page number was either 0 or greater than the page - ** number of the last page in the database, this function should return - ** SQLITE_CORRUPT or some other error (i.e. SQLITE_FULL). Check that this - ** is the case. */ - assert( (pgno>0 && pgno<=iLastPg) || rc!=SQLITE_OK ); testcase( pgno==0 ); - testcase( pgno==iLastPg ); - + assert( pgno!=0 || rc==SQLITE_CORRUPT ); return rc; } @@ -2240,9 +2234,11 @@ int sqlite3BtreeGetAutoVacuum(Btree *p){ ** is returned if we run out of memory. */ static int lockBtree(BtShared *pBt){ - int rc; - MemPage *pPage1; - int nPage; + int rc; /* Result code from subfunctions */ + MemPage *pPage1; /* Page 1 of the database file */ + int nPage; /* Number of pages in the database */ + int nPageFile = 0; /* Number of pages in the database file */ + int nPageHeader; /* Number of pages in the database according to hdr */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( pBt->pPage1==0 ); @@ -2254,10 +2250,14 @@ static int lockBtree(BtShared *pBt){ /* Do some checking to help insure the file we opened really is ** a valid database file. */ - rc = sqlite3PagerPagecount(pBt->pPager, &nPage); - if( rc!=SQLITE_OK ){ + nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); + if( (rc = sqlite3PagerPagecount(pBt->pPager, &nPageFile))!=SQLITE_OK ){; goto page1_init_failed; - }else if( nPage>0 ){ + } + if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ + nPage = nPageFile; + } + if( nPage>0 ){ int pageSize; int usableSize; u8 *page1 = pPage1->aData; @@ -2265,12 +2265,42 @@ static int lockBtree(BtShared *pBt){ if( memcmp(page1, zMagicHeader, 16)!=0 ){ goto page1_init_failed; } + +#ifdef SQLITE_OMIT_WAL if( page1[18]>1 ){ pBt->readOnly = 1; } if( page1[19]>1 ){ goto page1_init_failed; } +#else + if( page1[18]>2 ){ + pBt->readOnly = 1; + } + if( page1[19]>2 ){ + goto page1_init_failed; + } + + /* If the write version is set to 2, this database should be accessed + ** in WAL mode. If the log is not already open, open it now. Then + ** return SQLITE_OK and return without populating BtShared.pPage1. + ** The caller detects this and calls this function again. This is + ** required as the version of page 1 currently in the page1 buffer + ** may not be the latest version - there may be a newer one in the log + ** file. + */ + if( page1[19]==2 && pBt->doNotUseWAL==0 ){ + int isOpen = 0; + rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); + if( rc!=SQLITE_OK ){ + goto page1_init_failed; + }else if( isOpen==0 ){ + releasePage(pPage1); + return SQLITE_OK; + } + rc = SQLITE_NOTADB; + } +#endif /* The maximum embedded fraction must be exactly 25%. And the minimum ** embedded fraction must be 12.5% for both leaf-data and non-leaf-data. @@ -2303,6 +2333,10 @@ static int lockBtree(BtShared *pBt){ pageSize-usableSize); return rc; } + if( nPageHeader>nPageFile ){ + rc = SQLITE_CORRUPT_BKPT; + goto page1_init_failed; + } if( usableSize<480 ){ goto page1_init_failed; } @@ -2333,6 +2367,7 @@ static int lockBtree(BtShared *pBt){ pBt->minLeaf = (pBt->usableSize-12)*32/255 - 23; assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); pBt->pPage1 = pPage1; + pBt->nPage = nPage; return SQLITE_OK; page1_init_failed: @@ -2370,12 +2405,10 @@ static int newDatabase(BtShared *pBt){ MemPage *pP1; unsigned char *data; int rc; - int nPage; assert( sqlite3_mutex_held(pBt->mutex) ); - rc = sqlite3PagerPagecount(pBt->pPager, &nPage); - if( rc!=SQLITE_OK || nPage>0 ){ - return rc; + if( pBt->nPage>0 ){ + return SQLITE_OK; } pP1 = pBt->pPage1; assert( pP1!=0 ); @@ -2401,6 +2434,8 @@ static int newDatabase(BtShared *pBt){ put4byte(&data[36 + 4*4], pBt->autoVacuum); put4byte(&data[36 + 7*4], pBt->incrVacuum); #endif + pBt->nPage = 1; + data[31] = 1; return SQLITE_OK; } @@ -2490,6 +2525,7 @@ int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); if( SQLITE_OK!=rc ) goto trans_begun; + pBt->initiallyEmpty = (u8)(pBt->nPage==0); do { /* Call lockBtree() until either pBt->pPage1 is populated or ** lockBtree() returns something other than SQLITE_OK. lockBtree() @@ -2514,7 +2550,7 @@ int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ if( rc!=SQLITE_OK ){ unlockBtreeIfUnused(pBt); } - }while( rc==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && + }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); if( rc==SQLITE_OK ){ @@ -2769,12 +2805,12 @@ static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); */ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg){ Pgno nFreeList; /* Number of pages still on the free-list */ + int rc; assert( sqlite3_mutex_held(pBt->mutex) ); assert( iLastPg>nFin ); if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ - int rc; u8 eType; Pgno iPtrPage; @@ -2850,7 +2886,7 @@ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg){ while( iLastPg==PENDING_BYTE_PAGE(pBt)||PTRMAP_ISPAGE(pBt, iLastPg) ){ if( PTRMAP_ISPAGE(pBt, iLastPg) ){ MemPage *pPg; - int rc = btreeGetPage(pBt, iLastPg, &pPg, 0); + rc = btreeGetPage(pBt, iLastPg, &pPg, 0); if( rc!=SQLITE_OK ){ return rc; } @@ -2863,6 +2899,7 @@ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg){ iLastPg--; } sqlite3PagerTruncateImage(pBt->pPager, iLastPg); + pBt->nPage = iLastPg; } return SQLITE_OK; } @@ -2885,7 +2922,11 @@ int sqlite3BtreeIncrVacuum(Btree *p){ rc = SQLITE_DONE; }else{ invalidateAllOverflowCache(pBt); - rc = incrVacuumStep(pBt, 0, pagerPagecount(pBt)); + rc = incrVacuumStep(pBt, 0, btreePagecount(pBt)); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + put4byte(&pBt->pPage1->aData[28], pBt->nPage); + } } sqlite3BtreeLeave(p); return rc; @@ -2916,7 +2957,7 @@ static int autoVacuumCommit(BtShared *pBt){ int nEntry; /* Number of entries on one ptrmap page */ Pgno nOrig; /* Database size before freeing */ - nOrig = pagerPagecount(pBt); + nOrig = btreePagecount(pBt); if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ /* It is not possible to create a database for which the final page ** is either a pointer-map page or the pending-byte page. If one @@ -2941,11 +2982,12 @@ static int autoVacuumCommit(BtShared *pBt){ rc = incrVacuumStep(pBt, nFin, iFree); } if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ - rc = SQLITE_OK; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); put4byte(&pBt->pPage1->aData[32], 0); put4byte(&pBt->pPage1->aData[36], 0); + put4byte(&pBt->pPage1->aData[28], nFin); sqlite3PagerTruncateImage(pBt->pPager, nFin); + pBt->nPage = nFin; } if( rc!=SQLITE_OK ){ sqlite3PagerRollback(pPager); @@ -3195,6 +3237,11 @@ int sqlite3BtreeRollback(Btree *p){ ** call btreeGetPage() on page 1 again to make ** sure pPage1->aData is set correctly. */ if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ + int nPage = get4byte(28+(u8*)pPage1->aData); + testcase( nPage==0 ); + if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); + testcase( pBt->nPage!=nPage ); + pBt->nPage = nPage; releasePage(pPage1); } assert( countWriteCursors(pBt)==0 ); @@ -3232,17 +3279,13 @@ int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ assert( pBt->readOnly==0 ); assert( iStatement>0 ); assert( iStatement>p->db->nSavepoint ); - if( NEVER(p->inTrans!=TRANS_WRITE || pBt->readOnly) ){ - rc = SQLITE_INTERNAL; - }else{ - assert( pBt->inTransaction==TRANS_WRITE ); - /* At the pager level, a statement transaction is a savepoint with - ** an index greater than all savepoints created explicitly using - ** SQL statements. It is illegal to open, release or rollback any - ** such savepoints while the statement transaction savepoint is active. - */ - rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); - } + assert( pBt->inTransaction==TRANS_WRITE ); + /* At the pager level, a statement transaction is a savepoint with + ** an index greater than all savepoints created explicitly using + ** SQL statements. It is illegal to open, release or rollback any + ** such savepoints while the statement transaction savepoint is active. + */ + rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); sqlite3BtreeLeave(p); return rc; } @@ -3268,7 +3311,12 @@ int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ sqlite3BtreeEnter(p); rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); if( rc==SQLITE_OK ){ + if( iSavepoint<0 && pBt->initiallyEmpty ) pBt->nPage = 0; rc = newDatabase(pBt); + pBt->nPage = get4byte(28 + pBt->pPage1->aData); + if( pBt->nPage==0 ){ + sqlite3PagerPagecount(pBt->pPager, (int*)&pBt->nPage); + } } sqlite3BtreeLeave(p); } @@ -3334,7 +3382,7 @@ static int btreeCursor( if( NEVER(wrFlag && pBt->readOnly) ){ return SQLITE_READONLY; } - if( iTable==1 && pagerPagecount(pBt)==0 ){ + if( iTable==1 && btreePagecount(pBt)==0 ){ return SQLITE_EMPTY; } @@ -3605,7 +3653,7 @@ static int getOverflowPage( iGuess++; } - if( iGuess<=pagerPagecount(pBt) ){ + if( iGuess<=btreePagecount(pBt) ){ rc = ptrmapGet(pBt, iGuess, &eType, &pgno); if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ next = iGuess; @@ -4200,7 +4248,6 @@ int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ if( pCur->eState==CURSOR_INVALID ){ assert( pCur->apPage[pCur->iPage]->nCell==0 ); *pRes = 1; - rc = SQLITE_OK; }else{ assert( pCur->apPage[pCur->iPage]->nCell>0 ); *pRes = 0; @@ -4637,7 +4684,7 @@ static int allocateBtreePage( assert( sqlite3_mutex_held(pBt->mutex) ); pPage1 = pBt->pPage1; - mxPage = pagerPagecount(pBt); + mxPage = btreePagecount(pBt); n = get4byte(&pPage1->aData[36]); testcase( n==mxPage-1 ); if( n>=mxPage ){ @@ -4833,35 +4880,35 @@ static int allocateBtreePage( }else{ /* There are no pages on the freelist, so create a new page at the ** end of the file */ - int nPage = pagerPagecount(pBt); - *pPgno = nPage + 1; - - if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ - (*pPgno)++; - } + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc ) return rc; + pBt->nPage++; + if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++; #ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, *pPgno) ){ + if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){ /* If *pPgno refers to a pointer-map page, allocate two new pages ** at the end of the file instead of one. The first allocated page ** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; - TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", *pPgno)); - assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); - rc = btreeGetPage(pBt, *pPgno, &pPg, 0); + TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); + assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); + rc = btreeGetPage(pBt, pBt->nPage, &pPg, 1); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pPg->pDbPage); releasePage(pPg); } if( rc ) return rc; - (*pPgno)++; - if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ (*pPgno)++; } + pBt->nPage++; + if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; } } #endif + put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage); + *pPgno = pBt->nPage; assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); - rc = btreeGetPage(pBt, *pPgno, ppPage, 0); + rc = btreeGetPage(pBt, *pPgno, ppPage, 1); if( rc ) return rc; rc = sqlite3PagerWrite((*ppPage)->pDbPage); if( rc!=SQLITE_OK ){ @@ -5051,7 +5098,7 @@ static int clearCell(MemPage *pPage, unsigned char *pCell){ while( nOvfl-- ){ Pgno iNext = 0; MemPage *pOvfl = 0; - if( ovflPgno<2 || ovflPgno>pagerPagecount(pBt) ){ + if( ovflPgno<2 || ovflPgno>btreePagecount(pBt) ){ /* 0 is not a legal page number and page 1 cannot be an ** overflow page. Therefore if ovflPgno<2 or past the end of the ** file the database must be corrupt. */ @@ -6883,8 +6930,14 @@ static int btreeCreateTable(Btree *p, int *piTable, int flags){ releasePage(pRoot); return rc; } + + /* When the new root page was allocated, page 1 was made writable in + ** order either to increase the database filesize, or to decrement the + ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. + */ + assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) ); rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); - if( rc ){ + if( NEVER(rc) ){ releasePage(pRoot); return rc; } @@ -6924,7 +6977,7 @@ static int clearDatabasePage( int i; assert( sqlite3_mutex_held(pBt->mutex) ); - if( pgno>pagerPagecount(pBt) ){ + if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } @@ -7675,7 +7728,7 @@ char *sqlite3BtreeIntegrityCheck( nRef = sqlite3PagerRefcount(pBt->pPager); sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; - sCheck.nPage = pagerPagecount(sCheck.pBt); + sCheck.nPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; sCheck.nErr = 0; sCheck.mallocFailed = 0; @@ -7943,3 +7996,39 @@ void sqlite3BtreeCacheOverflow(BtCursor *pCur){ pCur->isIncrblobHandle = 1; } #endif + +/* +** Set both the "read version" (single byte at byte offset 18) and +** "write version" (single byte at byte offset 19) fields in the database +** header to iVersion. +*/ +int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ + BtShared *pBt = pBtree->pBt; + int rc; /* Return code */ + + assert( pBtree->inTrans==TRANS_NONE ); + assert( iVersion==1 || iVersion==2 ); + + /* If setting the version fields to 1, do not automatically open the + ** WAL connection, even if the version fields are currently set to 2. + */ + pBt->doNotUseWAL = (u8)(iVersion==1); + + rc = sqlite3BtreeBeginTrans(pBtree, 0); + if( rc==SQLITE_OK ){ + u8 *aData = pBt->pPage1->aData; + if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){ + rc = sqlite3BtreeBeginTrans(pBtree, 2); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc==SQLITE_OK ){ + aData[18] = (u8)iVersion; + aData[19] = (u8)iVersion; + } + } + } + } + + pBt->doNotUseWAL = 0; + return rc; +} diff --git a/src/btree.h b/src/btree.h index 4f034f3..584b463 100644 --- a/src/btree.h +++ b/src/btree.h @@ -81,6 +81,7 @@ int sqlite3BtreeSyncDisabled(Btree*); int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); int sqlite3BtreeGetPageSize(Btree*); int sqlite3BtreeMaxPageCount(Btree*,int); +u32 sqlite3BtreeLastPage(Btree*); int sqlite3BtreeSecureDelete(Btree*,int); int sqlite3BtreeGetReserve(Btree*); int sqlite3BtreeSetAutoVacuum(Btree *, int); @@ -185,6 +186,8 @@ int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); void sqlite3BtreeCacheOverflow(BtCursor *); void sqlite3BtreeClearCursor(BtCursor *); +int sqlite3BtreeSetVersion(Btree *pBt, int iVersion); + #ifndef NDEBUG int sqlite3BtreeCursorIsValid(BtCursor*); #endif diff --git a/src/btreeInt.h b/src/btreeInt.h index f6d95e5..8b68d7b 100644 --- a/src/btreeInt.h +++ b/src/btreeInt.h @@ -408,6 +408,7 @@ struct BtShared { u8 readOnly; /* True if the underlying file is readonly */ u8 pageSizeFixed; /* True if the page size can no longer be changed */ u8 secureDelete; /* True if secure_delete is enabled */ + u8 initiallyEmpty; /* Database is empty at start of transaction */ #ifndef SQLITE_OMIT_AUTOVACUUM u8 autoVacuum; /* True if auto-vacuum is enabled */ u8 incrVacuum; /* True if incr-vacuum is enabled */ @@ -419,7 +420,9 @@ struct BtShared { u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */ u16 minLeaf; /* Minimum local payload in a LEAFDATA table */ u8 inTransaction; /* Transaction state */ + u8 doNotUseWAL; /* If true, do not open write-ahead-log file */ int nTransaction; /* Number of open transactions (read + write) */ + u32 nPage; /* Number of pages in the database */ void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ sqlite3_mutex *mutex; /* Non-recursive mutex required to access this struct */ diff --git a/src/build.c b/src/build.c index 8e34386..d964177 100644 --- a/src/build.c +++ b/src/build.c @@ -2614,6 +2614,7 @@ Index *sqlite3CreateIndex( if( j>=pTab->nCol ){ sqlite3ErrorMsg(pParse, "table %s has no column named %s", pTab->zName, zColName); + pParse->checkSchema = 1; goto exit_create_index; } pIndex->aiColumn[i] = j; @@ -3382,7 +3383,7 @@ void sqlite3Savepoint(Parse *pParse, int op, Token *pName){ if( zName ){ Vdbe *v = sqlite3GetVdbe(pParse); #ifndef SQLITE_OMIT_AUTHORIZATION - static const char *az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; + static const char * const az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 ); #endif if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){ @@ -3422,7 +3423,6 @@ int sqlite3OpenTempDatabase(Parse *pParse){ db->mallocFailed = 1; return 1; } - sqlite3PagerJournalMode(sqlite3BtreePager(pBt), db->dfltJournalMode); } return 0; } diff --git a/src/callback.c b/src/callback.c index e6c51bc..c016959 100644 --- a/src/callback.c +++ b/src/callback.c @@ -353,14 +353,19 @@ FuncDef *sqlite3FindFunction( /* If no match is found, search the built-in functions. ** + ** If the SQLITE_PreferBuiltin flag is set, then search the built-in + ** functions even if a prior app-defined function was found. And give + ** priority to built-in functions. + ** ** Except, if createFlag is true, that means that we are trying to ** install a new function. Whatever FuncDef structure is returned will ** have fields overwritten with new information appropriate for the ** new function. But the FuncDefs for built-in functions are read-only. ** So we must not search for built-ins when creating a new function. */ - if( !createFlag && !pBest ){ + if( !createFlag && (pBest==0 || (db->flags & SQLITE_PreferBuiltin)!=0) ){ FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); + bestScore = 0; p = functionSearch(pHash, h, zName, nName); while( p ){ int score = matchQuality(p, nArg, enc); diff --git a/src/ctime.c b/src/ctime.c index dacdc58..f7248f4 100644 --- a/src/ctime.c +++ b/src/ctime.c @@ -171,6 +171,9 @@ static const char * const azCompileOpt[] = { #ifdef SQLITE_OMIT_AUTOINIT "OMIT_AUTOINIT", #endif +#ifdef SQLITE_OMIT_AUTOMATIC_INDEX + "OMIT_AUTOMATIC_INDEX", +#endif #ifdef SQLITE_OMIT_AUTOVACUUM "OMIT_AUTOVACUUM", #endif @@ -192,9 +195,11 @@ static const char * const azCompileOpt[] = { #ifdef SQLITE_OMIT_CHECK "OMIT_CHECK", #endif -#ifdef SQLITE_OMIT_COMPILEOPTION_DIAGS - "OMIT_COMPILEOPTION_DIAGS", -#endif +/* // redundant +** #ifdef SQLITE_OMIT_COMPILEOPTION_DIAGS +** "OMIT_COMPILEOPTION_DIAGS", +** #endif +*/ #ifdef SQLITE_OMIT_COMPLETE "OMIT_COMPLETE", #endif @@ -228,9 +233,6 @@ static const char * const azCompileOpt[] = { #ifdef SQLITE_OMIT_GET_TABLE "OMIT_GET_TABLE", #endif -#ifdef SQLITE_OMIT_GLOBALRECOVER - "OMIT_GLOBALRECOVER", -#endif #ifdef SQLITE_OMIT_INCRBLOB "OMIT_INCRBLOB", #endif @@ -309,6 +311,9 @@ static const char * const azCompileOpt[] = { #ifdef SQLITE_OMIT_VIRTUALTABLE "OMIT_VIRTUALTABLE", #endif +#ifdef SQLITE_OMIT_WAL + "OMIT_WAL", +#endif #ifdef SQLITE_OMIT_WSD "OMIT_WSD", #endif diff --git a/src/date.c b/src/date.c index 2c39a0a..04ffbe2 100644 --- a/src/date.c +++ b/src/date.c @@ -314,10 +314,8 @@ static int parseYyyyMmDd(const char *zDate, DateTime *p){ ** Set the time to the current time reported by the VFS */ static void setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ - double r; sqlite3 *db = sqlite3_context_db_handle(context); - sqlite3OsCurrentTime(db->pVfs, &r); - p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); + sqlite3OsCurrentTimeInt64(db->pVfs, &p->iJD); p->validJD = 1; } @@ -1038,22 +1036,15 @@ static void currentTimeFunc( time_t t; char *zFormat = (char *)sqlite3_user_data(context); sqlite3 *db; - double rT; + sqlite3_int64 iT; char zBuf[20]; UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); db = sqlite3_context_db_handle(context); - sqlite3OsCurrentTime(db->pVfs, &rT); -#ifndef SQLITE_OMIT_FLOATING_POINT - t = 86400.0*(rT - 2440587.5) + 0.5; -#else - /* without floating point support, rT will have - ** already lost fractional day precision. - */ - t = 86400 * (rT - 2440587) - 43200; -#endif + sqlite3OsCurrentTimeInt64(db->pVfs, &iT); + t = iT/1000 - 10000*(sqlite3_int64)21086676; #ifdef HAVE_GMTIME_R { struct tm sNow; diff --git a/src/delete.c b/src/delete.c index 9e99a4d..9608dc2 100644 --- a/src/delete.c +++ b/src/delete.c @@ -508,9 +508,7 @@ void sqlite3GenerateRowDelete( sqlite3VdbeAddOp2(v, OP_Copy, iRowid, iOld); for(iCol=0; iColnCol; iCol++){ if( mask==0xffffffff || mask&(1<pLeft, pEList->a[0].pExpr); } - }else if( pExpr->x.pList!=0 ){ + }else if( ALWAYS(pExpr->x.pList!=0) ){ /* Case 2: expr IN (exprlist) ** ** For each expression, build an index key from the evaluation and @@ -1705,7 +1705,6 @@ int sqlite3CodeSubselect( ** an integer 0 (not exists) or 1 (exists) into a memory cell ** and record that memory cell in iColumn. */ - static const Token one = { "1", 1 }; /* Token for literal value 1 */ Select *pSel; /* SELECT statement to encode */ SelectDest dest; /* How to deal with SELECt result */ @@ -1726,7 +1725,8 @@ int sqlite3CodeSubselect( VdbeComment((v, "Init EXISTS result")); } sqlite3ExprDelete(pParse->db, pSel->pLimit); - pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &one); + pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, + &sqlite3IntTokens[1]); if( sqlite3Select(pParse, pSel, &dest) ){ return 0; } @@ -1794,8 +1794,20 @@ static void sqlite3ExprCodeIN( sqlite3ExprCachePush(pParse); r1 = sqlite3GetTempReg(pParse); sqlite3ExprCode(pParse, pExpr->pLeft, r1); - sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); + /* If the LHS is NULL, then the result is either false or NULL depending + ** on whether the RHS is empty or not, respectively. + */ + if( destIfNull==destIfFalse ){ + /* Shortcut for the common case where the false and NULL outcomes are + ** the same. */ + sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); + }else{ + int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1); + sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse); + sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull); + sqlite3VdbeJumpHere(v, addr1); + } if( eType==IN_INDEX_ROWID ){ /* In this case, the RHS is the ROWID of table b-tree @@ -2082,6 +2094,27 @@ static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){ } } +/* +** Generate code to extract the value of the iCol-th column of a table. +*/ +void sqlite3ExprCodeGetColumnOfTable( + Vdbe *v, /* The VDBE under construction */ + Table *pTab, /* The table containing the value */ + int iTabCur, /* The cursor for this table */ + int iCol, /* Index of the column to extract */ + int regOut /* Extract the valud into this register */ +){ + if( iCol<0 || iCol==pTab->iPKey ){ + sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); + }else{ + int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; + sqlite3VdbeAddOp3(v, op, iTabCur, iCol, regOut); + } + if( iCol>=0 ){ + sqlite3ColumnDefault(v, pTab, iCol, regOut); + } +} + /* ** Generate code that will extract the iColumn-th column from ** table pTab and store the column value in a register. An effort @@ -2110,13 +2143,7 @@ int sqlite3ExprCodeGetColumn( } } assert( v!=0 ); - if( iColumn<0 ){ - sqlite3VdbeAddOp2(v, OP_Rowid, iTable, iReg); - }else if( ALWAYS(pTab!=0) ){ - int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; - sqlite3VdbeAddOp3(v, op, iTable, iColumn, iReg); - sqlite3ColumnDefault(v, pTab, iColumn, iReg); - } + sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg); sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); return iReg; } @@ -2353,27 +2380,12 @@ int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){ } #endif case TK_VARIABLE: { - VdbeOp *pOp; assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( pExpr->u.zToken!=0 ); assert( pExpr->u.zToken[0]!=0 ); - if( pExpr->u.zToken[1]==0 - && (pOp = sqlite3VdbeGetOp(v, -1))->opcode==OP_Variable - && pOp->p1+pOp->p3==pExpr->iColumn - && pOp->p2+pOp->p3==target - && pOp->p4.z==0 - ){ - /* If the previous instruction was a copy of the previous unnamed - ** parameter into the previous register, then simply increment the - ** repeat count on the prior instruction rather than making a new - ** instruction. - */ - pOp->p3++; - }else{ - sqlite3VdbeAddOp3(v, OP_Variable, pExpr->iColumn, target, 1); - if( pExpr->u.zToken[1]!=0 ){ - sqlite3VdbeChangeP4(v, -1, pExpr->u.zToken, 0); - } + sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); + if( pExpr->u.zToken[1]!=0 ){ + sqlite3VdbeChangeP4(v, -1, pExpr->u.zToken, 0); } break; } @@ -3440,7 +3452,6 @@ void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ ** an incorrect 0 or 1 could lead to a malfunction. */ int sqlite3ExprCompare(Expr *pA, Expr *pB){ - int i; if( pA==0||pB==0 ){ return pB==pA ? 0 : 2; } @@ -3453,18 +3464,7 @@ int sqlite3ExprCompare(Expr *pA, Expr *pB){ if( pA->op!=pB->op ) return 2; if( sqlite3ExprCompare(pA->pLeft, pB->pLeft) ) return 2; if( sqlite3ExprCompare(pA->pRight, pB->pRight) ) return 2; - - if( pA->x.pList && pB->x.pList ){ - if( pA->x.pList->nExpr!=pB->x.pList->nExpr ) return 2; - for(i=0; ix.pList->nExpr; i++){ - Expr *pExprA = pA->x.pList->a[i].pExpr; - Expr *pExprB = pB->x.pList->a[i].pExpr; - if( sqlite3ExprCompare(pExprA, pExprB) ) return 2; - } - }else if( pA->x.pList || pB->x.pList ){ - return 2; - } - + if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList) ) return 2; if( pA->iTable!=pB->iTable || pA->iColumn!=pB->iColumn ) return 2; if( ExprHasProperty(pA, EP_IntValue) ){ if( !ExprHasProperty(pB, EP_IntValue) || pA->u.iValue!=pB->u.iValue ){ @@ -3481,6 +3481,31 @@ int sqlite3ExprCompare(Expr *pA, Expr *pB){ return 0; } +/* +** Compare two ExprList objects. Return 0 if they are identical and +** non-zero if they differ in any way. +** +** This routine might return non-zero for equivalent ExprLists. The +** only consequence will be disabled optimizations. But this routine +** must never return 0 if the two ExprList objects are different, or +** a malfunction will result. +** +** Two NULL pointers are considered to be the same. But a NULL pointer +** always differs from a non-NULL pointer. +*/ +int sqlite3ExprListCompare(ExprList *pA, ExprList *pB){ + int i; + if( pA==0 && pB==0 ) return 0; + if( pA==0 || pB==0 ) return 1; + if( pA->nExpr!=pB->nExpr ) return 1; + for(i=0; inExpr; i++){ + Expr *pExprA = pA->a[i].pExpr; + Expr *pExprB = pB->a[i].pExpr; + if( pA->a[i].sortOrder!=pB->a[i].sortOrder ) return 1; + if( sqlite3ExprCompare(pExprA, pExprB) ) return 1; + } + return 0; +} /* ** Add a new element to the pAggInfo->aCol[] array. Return the index of diff --git a/src/func.c b/src/func.c index 7ff1fec..8b1b2f7 100644 --- a/src/func.c +++ b/src/func.c @@ -1411,20 +1411,15 @@ static void groupConcatFinalize(sqlite3_context *context){ } /* -** This function registered all of the above C functions as SQL -** functions. This should be the only routine in this file with -** external linkage. +** This routine does per-connection function registration. Most +** of the built-in functions above are part of the global function set. +** This routine only deals with those that are not global. */ void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ -#ifndef SQLITE_OMIT_ALTERTABLE - sqlite3AlterFunctions(db); -#endif - if( !db->mallocFailed ){ - int rc = sqlite3_overload_function(db, "MATCH", 2); - assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); - if( rc==SQLITE_NOMEM ){ - db->mallocFailed = 1; - } + int rc = sqlite3_overload_function(db, "MATCH", 2); + assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); + if( rc==SQLITE_NOMEM ){ + db->mallocFailed = 1; } } @@ -1592,4 +1587,7 @@ void sqlite3RegisterGlobalFunctions(void){ sqlite3FuncDefInsert(pHash, &aFunc[i]); } sqlite3RegisterDateTimeFunctions(); +#ifndef SQLITE_OMIT_ALTERTABLE + sqlite3AlterFunctions(); +#endif } diff --git a/src/global.c b/src/global.c index 673a274..0c89068 100644 --- a/src/global.c +++ b/src/global.c @@ -176,6 +176,15 @@ SQLITE_WSD struct Sqlite3Config sqlite3Config = { */ SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; +/* +** Constant tokens for values 0 and 1. +*/ +const Token sqlite3IntTokens[] = { + { "0", 1 }, + { "1", 1 } +}; + + /* ** The value of the "pending" byte must be 0x40000000 (1 byte past the ** 1-gibabyte boundary) in a compatible database. SQLite never uses @@ -194,7 +203,9 @@ SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; ** Changing the pending byte during operating results in undefined ** and dileterious behavior. */ +#ifndef SQLITE_OMIT_WSD int sqlite3PendingByte = 0x40000000; +#endif #include "opcodes.h" /* diff --git a/src/insert.c b/src/insert.c index 5379762..f6ad5ab 100644 --- a/src/insert.c +++ b/src/insert.c @@ -727,7 +727,7 @@ void sqlite3Insert( }else{ sqlite3ErrorMsg(pParse, "table %S has no column named %s", pTabList, 0, pColumn->a[i].zName); - pParse->nErr++; + pParse->checkSchema = 1; goto insert_cleanup; } } @@ -846,7 +846,7 @@ void sqlite3Insert( if( pColumn->a[j].idx==i ) break; } } - if( pColumn && j>=pColumn->nId ){ + if( (!useTempTable && !pList) || (pColumn && j>=pColumn->nId) ){ sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regCols+i+1); }else if( useTempTable ){ sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, regCols+i+1); diff --git a/src/journal.c b/src/journal.c index 2a806e3..2f9e222 100644 --- a/src/journal.c +++ b/src/journal.c @@ -182,7 +182,11 @@ static struct sqlite3_io_methods JournalFileMethods = { 0, /* xCheckReservedLock */ 0, /* xFileControl */ 0, /* xSectorSize */ - 0 /* xDeviceCharacteristics */ + 0, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0 /* xShmUnmap */ }; /* diff --git a/src/main.c b/src/main.c index c802507..c3828d5 100644 --- a/src/main.c +++ b/src/main.c @@ -776,7 +776,7 @@ const char *sqlite3ErrStr(int rc){ /* SQLITE_NOTFOUND */ 0, /* SQLITE_FULL */ "database or disk is full", /* SQLITE_CANTOPEN */ "unable to open database file", - /* SQLITE_PROTOCOL */ 0, + /* SQLITE_PROTOCOL */ "locking protocol", /* SQLITE_EMPTY */ "table contains no data", /* SQLITE_SCHEMA */ "database schema has changed", /* SQLITE_TOOBIG */ "string or blob too big", @@ -1186,6 +1186,145 @@ void *sqlite3_rollback_hook( return pRet; } +#ifndef SQLITE_OMIT_WAL +/* +** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). +** Invoke sqlite3_wal_checkpoint if the number of frames in the log file +** is greater than sqlite3.pWalArg cast to an integer (the value configured by +** wal_autocheckpoint()). +*/ +int sqlite3WalDefaultHook( + void *pClientData, /* Argument */ + sqlite3 *db, /* Connection */ + const char *zDb, /* Database */ + int nFrame /* Size of WAL */ +){ + if( nFrame>=SQLITE_PTR_TO_INT(pClientData) ){ + sqlite3BeginBenignMalloc(); + sqlite3_wal_checkpoint(db, zDb); + sqlite3EndBenignMalloc(); + } + return SQLITE_OK; +} +#endif /* SQLITE_OMIT_WAL */ + +/* +** Configure an sqlite3_wal_hook() callback to automatically checkpoint +** a database after committing a transaction if there are nFrame or +** more frames in the log file. Passing zero or a negative value as the +** nFrame parameter disables automatic checkpoints entirely. +** +** The callback registered by this function replaces any existing callback +** registered using sqlite3_wal_hook(). Likewise, registering a callback +** using sqlite3_wal_hook() disables the automatic checkpoint mechanism +** configured by this function. +*/ +int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){ +#ifndef SQLITE_OMIT_WAL + if( nFrame>0 ){ + sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame)); + }else{ + sqlite3_wal_hook(db, 0, 0); + } +#endif + return SQLITE_OK; +} + +/* +** Register a callback to be invoked each time a transaction is written +** into the write-ahead-log by this database connection. +*/ +void *sqlite3_wal_hook( + sqlite3 *db, /* Attach the hook to this db handle */ + int(*xCallback)(void *, sqlite3*, const char*, int), + void *pArg /* First argument passed to xCallback() */ +){ +#ifndef SQLITE_OMIT_WAL + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pWalArg; + db->xWalCallback = xCallback; + db->pWalArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +#else + return 0; +#endif +} + + +/* +** Checkpoint database zDb. If zDb is NULL, or if the buffer zDb points +** to contains a zero-length string, all attached databases are +** checkpointed. +*/ +int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ +#ifdef SQLITE_OMIT_WAL + return SQLITE_OK; +#else + int rc; /* Return code */ + int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ + + sqlite3_mutex_enter(db->mutex); + if( zDb && zDb[0] ){ + iDb = sqlite3FindDbName(db, zDb); + } + if( iDb<0 ){ + rc = SQLITE_ERROR; + sqlite3Error(db, SQLITE_ERROR, "unknown database: %s", zDb); + }else{ + rc = sqlite3Checkpoint(db, iDb); + sqlite3Error(db, rc, 0); + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +#endif +} + +#ifndef SQLITE_OMIT_WAL +/* +** Run a checkpoint on database iDb. This is a no-op if database iDb is +** not currently open in WAL mode. +** +** If a transaction is open on the database being checkpointed, this +** function returns SQLITE_LOCKED and a checkpoint is not attempted. If +** an error occurs while running the checkpoint, an SQLite error code is +** returned (i.e. SQLITE_IOERR). Otherwise, SQLITE_OK. +** +** The mutex on database handle db should be held by the caller. The mutex +** associated with the specific b-tree being checkpointed is taken by +** this function while the checkpoint is running. +** +** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are +** checkpointed. If an error is encountered it is returned immediately - +** no attempt is made to checkpoint any remaining databases. +*/ +int sqlite3Checkpoint(sqlite3 *db, int iDb){ + int rc = SQLITE_OK; /* Return code */ + int i; /* Used to iterate through attached dbs */ + + assert( sqlite3_mutex_held(db->mutex) ); + + for(i=0; inDb && rc==SQLITE_OK; i++){ + if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + if( sqlite3BtreeIsInReadTrans(pBt) ){ + rc = SQLITE_LOCKED; + }else{ + sqlite3BtreeEnter(pBt); + rc = sqlite3PagerCheckpoint(sqlite3BtreePager(pBt)); + sqlite3BtreeLeave(pBt); + } + } + } + } + + return rc; +} +#endif /* SQLITE_OMIT_WAL */ + /* ** This function returns true if main-memory should be used instead of ** a temporary file for transient pager files and statement journals. @@ -1608,7 +1747,7 @@ static int openDatabase( db->autoCommit = 1; db->nextAutovac = -1; db->nextPagesize = 0; - db->flags |= SQLITE_ShortColNames + db->flags |= SQLITE_ShortColNames | SQLITE_AutoIndex #if SQLITE_DEFAULT_FILE_FORMAT<4 | SQLITE_LegacyFileFmt #endif @@ -1746,6 +1885,8 @@ static int openDatabase( setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside, sqlite3GlobalConfig.nLookaside); + sqlite3_wal_autocheckpoint(db, SQLITE_DEFAULT_WAL_AUTOCHECKPOINT); + opendb_out: if( db ){ assert( db->mutex!=0 || isThreadsafe==0 || sqlite3GlobalConfig.bFullMutex==0 ); @@ -1920,7 +2061,6 @@ int sqlite3_collation_needed16( } #endif /* SQLITE_OMIT_UTF16 */ -#ifndef SQLITE_OMIT_GLOBALRECOVER #ifndef SQLITE_OMIT_DEPRECATED /* ** This function is now an anachronism. It used to be used to recover from a @@ -1930,7 +2070,6 @@ int sqlite3_global_recover(void){ return SQLITE_OK; } #endif -#endif /* ** Test to see whether or not the database connection is in autocommit @@ -1958,17 +2097,22 @@ int sqlite3_get_autocommit(sqlite3 *db){ int sqlite3CorruptError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_CORRUPT, - "database corruption found by source line %d", lineno); + "database corruption at line %d of [%.10s]", + lineno, 20+sqlite3_sourceid()); return SQLITE_CORRUPT; } int sqlite3MisuseError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_MISUSE, "misuse detected by source line %d", lineno); + sqlite3_log(SQLITE_MISUSE, + "misuse at line %d of [%.10s]", + lineno, 20+sqlite3_sourceid()); return SQLITE_MISUSE; } int sqlite3CantopenError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_CANTOPEN, "cannot open file at source line %d", lineno); + sqlite3_log(SQLITE_CANTOPEN, + "cannot open file at line %d of [%.10s]", + lineno, 20+sqlite3_sourceid()); return SQLITE_CANTOPEN; } @@ -2240,9 +2384,13 @@ int sqlite3_test_control(int op, ...){ ** dileterious behavior. */ case SQLITE_TESTCTRL_PENDING_BYTE: { - unsigned int newVal = va_arg(ap, unsigned int); - rc = sqlite3PendingByte; - if( newVal ) sqlite3PendingByte = newVal; + rc = PENDING_BYTE; +#ifndef SQLITE_OMIT_WSD + { + unsigned int newVal = va_arg(ap, unsigned int); + if( newVal ) sqlite3PendingByte = newVal; + } +#endif break; } @@ -2346,6 +2494,15 @@ int sqlite3_test_control(int op, ...){ } #endif + /* sqlite3_test_control(SQLITE_TESTCTRL_PGHDRSZ) + ** + ** Return the size of a pcache header in bytes. + */ + case SQLITE_TESTCTRL_PGHDRSZ: { + rc = sizeof(PgHdr); + break; + } + } va_end(ap); #endif /* SQLITE_OMIT_BUILTIN_TEST */ diff --git a/src/malloc.c b/src/malloc.c index 6b25187..c7f8a19 100644 --- a/src/malloc.c +++ b/src/malloc.c @@ -315,11 +315,11 @@ void *sqlite3ScratchMalloc(int n){ assert( n>0 ); #if SQLITE_THREADSAFE==0 && !defined(NDEBUG) - /* Verify that no more than one scratch allocation per thread + /* Verify that no more than two scratch allocation per thread ** is outstanding at one time. (This is only checked in the ** single-threaded case since checking in the multi-threaded case ** would be much more complicated.) */ - assert( scratchAllocOut==0 ); + assert( scratchAllocOut<=1 ); #endif if( sqlite3GlobalConfig.szScratch=(void*)mem0.aScratchFree ){ + assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); if( sqlite3GlobalConfig.bMemstat ){ int iSize = sqlite3MallocSize(p); sqlite3_mutex_enter(mem0.mutex); @@ -396,6 +389,16 @@ void sqlite3ScratchFree(void *p){ mem0.aScratchFree[mem0.nScratchFree++] = i; sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_USED, -1); sqlite3_mutex_leave(mem0.mutex); + +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) + /* Verify that no more than two scratch allocation per thread + ** is outstanding at one time. (This is only checked in the + ** single-threaded case since checking in the multi-threaded case + ** would be much more complicated.) */ + assert( scratchAllocOut>=1 && scratchAllocOut<=2 ); + scratchAllocOut = 0; +#endif + } } } @@ -416,6 +419,7 @@ static int isLookaside(sqlite3 *db, void *p){ ** sqlite3Malloc() or sqlite3_malloc(). */ int sqlite3MallocSize(void *p){ + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); return sqlite3GlobalConfig.m.xSize(p); } int sqlite3DbMallocSize(sqlite3 *db, void *p){ @@ -423,6 +427,8 @@ int sqlite3DbMallocSize(sqlite3 *db, void *p){ if( isLookaside(db, p) ){ return db->lookaside.sz; }else{ + assert( sqlite3MemdebugHasType(p, + db ? (MEMTYPE_DB|MEMTYPE_HEAP) : MEMTYPE_HEAP) ); return sqlite3GlobalConfig.m.xSize(p); } } @@ -432,6 +438,7 @@ int sqlite3DbMallocSize(sqlite3 *db, void *p){ */ void sqlite3_free(void *p){ if( p==0 ) return; + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); if( sqlite3GlobalConfig.bMemstat ){ sqlite3_mutex_enter(mem0.mutex); sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -sqlite3MallocSize(p)); @@ -454,6 +461,8 @@ void sqlite3DbFree(sqlite3 *db, void *p){ db->lookaside.pFree = pBuf; db->lookaside.nOut--; }else{ + assert( sqlite3MemdebugHasType(p, MEMTYPE_DB|MEMTYPE_HEAP) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); sqlite3_free(p); } } @@ -486,6 +495,7 @@ void *sqlite3Realloc(void *pOld, int nBytes){ mem0.alarmThreshold ){ sqlite3MallocAlarm(nNew-nOld); } + assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) ); pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); if( pNew==0 && mem0.alarmCallback ){ sqlite3MallocAlarm(nBytes); @@ -583,6 +593,8 @@ void *sqlite3DbMallocRaw(sqlite3 *db, int n){ if( !p && db ){ db->mallocFailed = 1; } + sqlite3MemdebugSetType(p, + (db && db->lookaside.bEnabled) ? MEMTYPE_DB : MEMTYPE_HEAP); return p; } @@ -608,10 +620,14 @@ void *sqlite3DbRealloc(sqlite3 *db, void *p, int n){ sqlite3DbFree(db, p); } }else{ + assert( sqlite3MemdebugHasType(p, MEMTYPE_DB|MEMTYPE_HEAP) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); pNew = sqlite3_realloc(p, n); if( !pNew ){ db->mallocFailed = 1; } + sqlite3MemdebugSetType(pNew, + db->lookaside.bEnabled ? MEMTYPE_DB : MEMTYPE_HEAP); } } return pNew; diff --git a/src/mem2.c b/src/mem2.c index 5eb937e..a82fee8 100644 --- a/src/mem2.c +++ b/src/mem2.c @@ -57,7 +57,8 @@ struct MemBlockHdr { struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ char nBacktrace; /* Number of backtraces on this alloc */ char nBacktraceSlots; /* Available backtrace slots */ - short nTitle; /* Bytes of title; includes '\0' */ + u8 nTitle; /* Bytes of title; includes '\0' */ + u8 eType; /* Allocation type code */ int iForeGuard; /* Guard word for sanity */ }; @@ -265,6 +266,7 @@ static void *sqlite3MemMalloc(int nByte){ } mem.pLast = pHdr; pHdr->iForeGuard = FOREGUARD; + pHdr->eType = MEMTYPE_HEAP; pHdr->nBacktraceSlots = mem.nBacktrace; pHdr->nTitle = mem.nTitle; if( mem.nBacktrace ){ @@ -372,6 +374,47 @@ void sqlite3MemSetDefault(void){ sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); } +/* +** Set the "type" of an allocation. +*/ +void sqlite3MemdebugSetType(void *p, u8 eType){ + if( p ){ + struct MemBlockHdr *pHdr; + pHdr = sqlite3MemsysGetHeader(p); + assert( pHdr->iForeGuard==FOREGUARD ); + pHdr->eType = eType; + } +} + +/* +** Return TRUE if the mask of type in eType matches the type of the +** allocation p. Also return true if p==NULL. +** +** This routine is designed for use within an assert() statement, to +** verify the type of an allocation. For example: +** +** assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) ); +*/ +int sqlite3MemdebugHasType(void *p, u8 eType){ + int rc = 1; + if( p ){ + struct MemBlockHdr *pHdr; + pHdr = sqlite3MemsysGetHeader(p); + assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ + assert( (pHdr->eType & (pHdr->eType-1))==0 ); /* Only one type bit set */ + if( (pHdr->eType&eType)==0 ){ + void **pBt; + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(stderr)); + fprintf(stderr, "\n"); + rc = 0; + } + } + return rc; +} + + /* ** Set the number of backtrace levels kept for each allocation. ** A value of zero turns off backtracing. The number is always rounded diff --git a/src/memjournal.c b/src/memjournal.c index 0d81ecf..68c2ff8 100644 --- a/src/memjournal.c +++ b/src/memjournal.c @@ -196,11 +196,10 @@ static int memjrnlClose(sqlite3_file *pJfd){ ** exists purely as a contingency, in case some malfunction in some other ** part of SQLite causes Sync to be called by mistake. */ -static int memjrnlSync(sqlite3_file *NotUsed, int NotUsed2){ /*NO_TEST*/ - UNUSED_PARAMETER2(NotUsed, NotUsed2); /*NO_TEST*/ - assert( 0 ); /*NO_TEST*/ - return SQLITE_OK; /*NO_TEST*/ -} /*NO_TEST*/ +static int memjrnlSync(sqlite3_file *NotUsed, int NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return SQLITE_OK; +} /* ** Query the size of the file in bytes. @@ -214,7 +213,7 @@ static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ /* ** Table of methods for MemJournal sqlite3_file object. */ -static struct sqlite3_io_methods MemJournalMethods = { +static const struct sqlite3_io_methods MemJournalMethods = { 1, /* iVersion */ memjrnlClose, /* xClose */ memjrnlRead, /* xRead */ @@ -227,7 +226,11 @@ static struct sqlite3_io_methods MemJournalMethods = { 0, /* xCheckReservedLock */ 0, /* xFileControl */ 0, /* xSectorSize */ - 0 /* xDeviceCharacteristics */ + 0, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0 /* xShmUnlock */ }; /* @@ -237,7 +240,7 @@ void sqlite3MemJournalOpen(sqlite3_file *pJfd){ MemJournal *p = (MemJournal *)pJfd; assert( EIGHT_BYTE_ALIGNMENT(p) ); memset(p, 0, sqlite3MemJournalSize()); - p->pMethod = &MemJournalMethods; + p->pMethod = (sqlite3_io_methods*)&MemJournalMethods; } /* diff --git a/src/mutex.c b/src/mutex.c index 11d498c..869a4ae 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -31,23 +31,26 @@ static SQLITE_WSD int mutexIsInit = 0; */ int sqlite3MutexInit(void){ int rc = SQLITE_OK; - if( sqlite3GlobalConfig.bCoreMutex ){ - if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ - /* If the xMutexAlloc method has not been set, then the user did not - ** install a mutex implementation via sqlite3_config() prior to - ** sqlite3_initialize() being called. This block copies pointers to - ** the default implementation into the sqlite3GlobalConfig structure. - */ - sqlite3_mutex_methods *pFrom = sqlite3DefaultMutex(); - sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex; + if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ + /* If the xMutexAlloc method has not been set, then the user did not + ** install a mutex implementation via sqlite3_config() prior to + ** sqlite3_initialize() being called. This block copies pointers to + ** the default implementation into the sqlite3GlobalConfig structure. + */ + sqlite3_mutex_methods const *pFrom; + sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex; - memcpy(pTo, pFrom, offsetof(sqlite3_mutex_methods, xMutexAlloc)); - memcpy(&pTo->xMutexFree, &pFrom->xMutexFree, - sizeof(*pTo) - offsetof(sqlite3_mutex_methods, xMutexFree)); - pTo->xMutexAlloc = pFrom->xMutexAlloc; + if( sqlite3GlobalConfig.bCoreMutex ){ + pFrom = sqlite3DefaultMutex(); + }else{ + pFrom = sqlite3NoopMutex(); } - rc = sqlite3GlobalConfig.mutex.xMutexInit(); + memcpy(pTo, pFrom, offsetof(sqlite3_mutex_methods, xMutexAlloc)); + memcpy(&pTo->xMutexFree, &pFrom->xMutexFree, + sizeof(*pTo) - offsetof(sqlite3_mutex_methods, xMutexFree)); + pTo->xMutexAlloc = pFrom->xMutexAlloc; } + rc = sqlite3GlobalConfig.mutex.xMutexInit(); #ifdef SQLITE_DEBUG GLOBAL(int, mutexIsInit) = 1; diff --git a/src/mutex_noop.c b/src/mutex_noop.c index ee74da1..c5fd520 100644 --- a/src/mutex_noop.c +++ b/src/mutex_noop.c @@ -27,25 +27,30 @@ */ #include "sqliteInt.h" +#ifndef SQLITE_MUTEX_OMIT -#if defined(SQLITE_MUTEX_NOOP) && !defined(SQLITE_DEBUG) +#ifndef SQLITE_DEBUG /* ** Stub routines for all mutex methods. ** ** This routines provide no mutual exclusion or error checking. */ -static int noopMutexHeld(sqlite3_mutex *p){ return 1; } -static int noopMutexNotheld(sqlite3_mutex *p){ return 1; } static int noopMutexInit(void){ return SQLITE_OK; } static int noopMutexEnd(void){ return SQLITE_OK; } -static sqlite3_mutex *noopMutexAlloc(int id){ return (sqlite3_mutex*)8; } -static void noopMutexFree(sqlite3_mutex *p){ return; } -static void noopMutexEnter(sqlite3_mutex *p){ return; } -static int noopMutexTry(sqlite3_mutex *p){ return SQLITE_OK; } -static void noopMutexLeave(sqlite3_mutex *p){ return; } +static sqlite3_mutex *noopMutexAlloc(int id){ + UNUSED_PARAMETER(id); + return (sqlite3_mutex*)8; +} +static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } +static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } +static int noopMutexTry(sqlite3_mutex *p){ + UNUSED_PARAMETER(p); + return SQLITE_OK; +} +static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } -sqlite3_mutex_methods *sqlite3DefaultMutex(void){ - static sqlite3_mutex_methods sMutex = { +sqlite3_mutex_methods const *sqlite3NoopMutex(void){ + static const sqlite3_mutex_methods sMutex = { noopMutexInit, noopMutexEnd, noopMutexAlloc, @@ -54,15 +59,15 @@ sqlite3_mutex_methods *sqlite3DefaultMutex(void){ noopMutexTry, noopMutexLeave, - noopMutexHeld, - noopMutexNotheld + 0, + 0, }; return &sMutex; } -#endif /* defined(SQLITE_MUTEX_NOOP) && !defined(SQLITE_DEBUG) */ +#endif /* !SQLITE_DEBUG */ -#if defined(SQLITE_MUTEX_NOOP) && defined(SQLITE_DEBUG) +#ifdef SQLITE_DEBUG /* ** In this implementation, error checking is provided for testing ** and debugging purposes. The mutexes still do not provide any @@ -72,19 +77,21 @@ sqlite3_mutex_methods *sqlite3DefaultMutex(void){ /* ** The mutex object */ -struct sqlite3_mutex { +typedef struct sqlite3_debug_mutex { int id; /* The mutex type */ int cnt; /* Number of entries without a matching leave */ -}; +} sqlite3_debug_mutex; /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use inside assert() statements. */ -static int debugMutexHeld(sqlite3_mutex *p){ +static int debugMutexHeld(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; return p==0 || p->cnt>0; } -static int debugMutexNotheld(sqlite3_mutex *p){ +static int debugMutexNotheld(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; return p==0 || p->cnt==0; } @@ -100,8 +107,8 @@ static int debugMutexEnd(void){ return SQLITE_OK; } ** that means that a mutex could not be allocated. */ static sqlite3_mutex *debugMutexAlloc(int id){ - static sqlite3_mutex aStatic[6]; - sqlite3_mutex *pNew = 0; + static sqlite3_debug_mutex aStatic[6]; + sqlite3_debug_mutex *pNew = 0; switch( id ){ case SQLITE_MUTEX_FAST: case SQLITE_MUTEX_RECURSIVE: { @@ -120,13 +127,14 @@ static sqlite3_mutex *debugMutexAlloc(int id){ break; } } - return pNew; + return (sqlite3_mutex*)pNew; } /* ** This routine deallocates a previously allocated mutex. */ -static void debugMutexFree(sqlite3_mutex *p){ +static void debugMutexFree(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; assert( p->cnt==0 ); assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); sqlite3_free(p); @@ -143,12 +151,14 @@ static void debugMutexFree(sqlite3_mutex *p){ ** can enter. If the same thread tries to enter any other kind of mutex ** more than once, the behavior is undefined. */ -static void debugMutexEnter(sqlite3_mutex *p){ - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); +static void debugMutexEnter(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); p->cnt++; } -static int debugMutexTry(sqlite3_mutex *p){ - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); +static int debugMutexTry(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); p->cnt++; return SQLITE_OK; } @@ -159,14 +169,15 @@ static int debugMutexTry(sqlite3_mutex *p){ ** is undefined if the mutex is not currently entered or ** is not currently allocated. SQLite will never do either. */ -static void debugMutexLeave(sqlite3_mutex *p){ - assert( debugMutexHeld(p) ); +static void debugMutexLeave(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( debugMutexHeld(pX) ); p->cnt--; - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); } -sqlite3_mutex_methods *sqlite3DefaultMutex(void){ - static sqlite3_mutex_methods sMutex = { +sqlite3_mutex_methods const *sqlite3NoopMutex(void){ + static const sqlite3_mutex_methods sMutex = { debugMutexInit, debugMutexEnd, debugMutexAlloc, @@ -181,4 +192,15 @@ sqlite3_mutex_methods *sqlite3DefaultMutex(void){ return &sMutex; } -#endif /* defined(SQLITE_MUTEX_NOOP) && defined(SQLITE_DEBUG) */ +#endif /* SQLITE_DEBUG */ + +/* +** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation +** is used regardless of the run-time threadsafety setting. +*/ +#ifdef SQLITE_MUTEX_NOOP +sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + return sqlite3NoopMutex(); +} +#endif /* SQLITE_MUTEX_NOOP */ +#endif /* SQLITE_MUTEX_OMIT */ diff --git a/src/mutex_os2.c b/src/mutex_os2.c index f16dc21..4438c09 100644 --- a/src/mutex_os2.c +++ b/src/mutex_os2.c @@ -251,8 +251,8 @@ static void os2MutexLeave(sqlite3_mutex *p){ DosReleaseMutexSem(p->mutex); } -sqlite3_mutex_methods *sqlite3DefaultMutex(void){ - static sqlite3_mutex_methods sMutex = { +sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + static const sqlite3_mutex_methods sMutex = { os2MutexInit, os2MutexEnd, os2MutexAlloc, diff --git a/src/mutex_unix.c b/src/mutex_unix.c index 402757f..a1ebc3b 100644 --- a/src/mutex_unix.c +++ b/src/mutex_unix.c @@ -24,23 +24,33 @@ #include +/* +** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields +** are necessary under two condidtions: (1) Debug builds and (2) using +** home-grown mutexes. Encapsulate these conditions into a single #define. +*/ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX) +# define SQLITE_MUTEX_NREF 1 +#else +# define SQLITE_MUTEX_NREF 0 +#endif /* ** Each recursive mutex is an instance of the following structure. */ struct sqlite3_mutex { pthread_mutex_t mutex; /* Mutex controlling the lock */ +#if SQLITE_MUTEX_NREF int id; /* Mutex type */ - int nRef; /* Number of entrances */ - pthread_t owner; /* Thread that is within this mutex */ -#ifdef SQLITE_DEBUG + volatile int nRef; /* Number of entrances */ + volatile pthread_t owner; /* Thread that is within this mutex */ int trace; /* True to trace changes */ #endif }; -#ifdef SQLITE_DEBUG +#if SQLITE_MUTEX_NREF #define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0, 0 } #else -#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0 } +#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } #endif /* @@ -142,14 +152,18 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ pthread_mutex_init(&p->mutex, &recursiveAttr); pthread_mutexattr_destroy(&recursiveAttr); #endif +#if SQLITE_MUTEX_NREF p->id = iType; +#endif } break; } case SQLITE_MUTEX_FAST: { p = sqlite3MallocZero( sizeof(*p) ); if( p ){ +#if SQLITE_MUTEX_NREF p->id = iType; +#endif pthread_mutex_init(&p->mutex, 0); } break; @@ -158,7 +172,9 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ assert( iType-2 >= 0 ); assert( iType-2 < ArraySize(staticMutexes) ); p = &staticMutexes[iType-2]; +#if SQLITE_MUTEX_NREF p->id = iType; +#endif break; } } @@ -218,9 +234,11 @@ static void pthreadMutexEnter(sqlite3_mutex *p){ /* Use the built-in recursive mutexes if they are available. */ pthread_mutex_lock(&p->mutex); +#if SQLITE_MUTEX_NREF p->owner = pthread_self(); p->nRef++; #endif +#endif #ifdef SQLITE_DEBUG if( p->trace ){ @@ -261,8 +279,10 @@ static int pthreadMutexTry(sqlite3_mutex *p){ /* Use the built-in recursive mutexes if they are available. */ if( pthread_mutex_trylock(&p->mutex)==0 ){ +#if SQLITE_MUTEX_NREF p->owner = pthread_self(); p->nRef++; +#endif rc = SQLITE_OK; }else{ rc = SQLITE_BUSY; @@ -285,7 +305,9 @@ static int pthreadMutexTry(sqlite3_mutex *p){ */ static void pthreadMutexLeave(sqlite3_mutex *p){ assert( pthreadMutexHeld(p) ); +#if SQLITE_MUTEX_NREF p->nRef--; +#endif assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); #ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX @@ -303,8 +325,8 @@ static void pthreadMutexLeave(sqlite3_mutex *p){ #endif } -sqlite3_mutex_methods *sqlite3DefaultMutex(void){ - static sqlite3_mutex_methods sMutex = { +sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + static const sqlite3_mutex_methods sMutex = { pthreadMutexInit, pthreadMutexEnd, pthreadMutexAlloc, diff --git a/src/mutex_w32.c b/src/mutex_w32.c index 237d24e..442a6b7 100644 --- a/src/mutex_w32.c +++ b/src/mutex_w32.c @@ -25,9 +25,9 @@ struct sqlite3_mutex { CRITICAL_SECTION mutex; /* Mutex controlling the lock */ int id; /* Mutex type */ - int nRef; /* Number of enterances */ - DWORD owner; /* Thread holding this mutex */ #ifdef SQLITE_DEBUG + volatile int nRef; /* Number of enterances */ + volatile DWORD owner; /* Thread holding this mutex */ int trace; /* True to trace changes */ #endif }; @@ -35,7 +35,7 @@ struct sqlite3_mutex { #ifdef SQLITE_DEBUG #define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, 0L, (DWORD)0, 0 } #else -#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, 0L, (DWORD)0 } +#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 } #endif /* @@ -191,7 +191,9 @@ static sqlite3_mutex *winMutexAlloc(int iType){ case SQLITE_MUTEX_RECURSIVE: { p = sqlite3MallocZero( sizeof(*p) ); if( p ){ +#ifdef SQLITE_DEBUG p->id = iType; +#endif InitializeCriticalSection(&p->mutex); } break; @@ -201,7 +203,9 @@ static sqlite3_mutex *winMutexAlloc(int iType){ assert( iType-2 >= 0 ); assert( iType-2 < ArraySize(winMutex_staticMutexes) ); p = &winMutex_staticMutexes[iType-2]; +#ifdef SQLITE_DEBUG p->id = iType; +#endif break; } } @@ -234,12 +238,14 @@ static void winMutexFree(sqlite3_mutex *p){ ** more than once, the behavior is undefined. */ static void winMutexEnter(sqlite3_mutex *p){ +#ifdef SQLITE_DEBUG DWORD tid = GetCurrentThreadId(); assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); +#endif EnterCriticalSection(&p->mutex); +#ifdef SQLITE_DEBUG p->owner = tid; p->nRef++; -#ifdef SQLITE_DEBUG if( p->trace ){ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } @@ -288,11 +294,11 @@ static int winMutexTry(sqlite3_mutex *p){ static void winMutexLeave(sqlite3_mutex *p){ #ifndef NDEBUG DWORD tid = GetCurrentThreadId(); -#endif assert( p->nRef>0 ); assert( p->owner==tid ); p->nRef--; assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); +#endif LeaveCriticalSection(&p->mutex); #ifdef SQLITE_DEBUG if( p->trace ){ @@ -301,8 +307,8 @@ static void winMutexLeave(sqlite3_mutex *p){ #endif } -sqlite3_mutex_methods *sqlite3DefaultMutex(void){ - static sqlite3_mutex_methods sMutex = { +sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + static const sqlite3_mutex_methods sMutex = { winMutexInit, winMutexEnd, winMutexAlloc, diff --git a/src/notify.c b/src/notify.c index 6362678..4c8ab00 100644 --- a/src/notify.c +++ b/src/notify.c @@ -157,6 +157,7 @@ int sqlite3_unlock_notify( if( xNotify==0 ){ removeFromBlockedList(db); + db->pBlockingConnection = 0; db->pUnlockConnection = 0; db->xUnlockNotify = 0; db->pUnlockArg = 0; diff --git a/src/os.c b/src/os.c index f3600cb..35b48f1 100644 --- a/src/os.c +++ b/src/os.c @@ -34,8 +34,10 @@ ** sqlite3OsLock() ** */ -#if defined(SQLITE_TEST) && (SQLITE_OS_WIN==0) - #define DO_OS_MALLOC_TEST(x) if (!x || !sqlite3IsMemJournal(x)) { \ +#if defined(SQLITE_TEST) +int sqlite3_memdebug_vfs_oom_test = 1; + #define DO_OS_MALLOC_TEST(x) \ + if (sqlite3_memdebug_vfs_oom_test && (!x || !sqlite3IsMemJournal(x))) { \ void *pTstAlloc = sqlite3Malloc(10); \ if (!pTstAlloc) return SQLITE_IOERR_NOMEM; \ sqlite3_free(pTstAlloc); \ @@ -98,6 +100,24 @@ int sqlite3OsSectorSize(sqlite3_file *id){ int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ return id->pMethods->xDeviceCharacteristics(id); } +int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){ + return id->pMethods->xShmLock(id, offset, n, flags); +} +void sqlite3OsShmBarrier(sqlite3_file *id){ + id->pMethods->xShmBarrier(id); +} +int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){ + return id->pMethods->xShmUnmap(id, deleteFlag); +} +int sqlite3OsShmMap( + sqlite3_file *id, /* Database file handle */ + int iPage, + int pgsz, + int bExtend, /* True to extend file if necessary */ + void volatile **pp /* OUT: Pointer to mapping */ +){ + return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp); +} /* ** The next group of routines are convenience wrappers around the @@ -112,11 +132,11 @@ int sqlite3OsOpen( ){ int rc; DO_OS_MALLOC_TEST(0); - /* 0x7f3f is a mask of SQLITE_OPEN_ flags that are valid to be passed + /* 0x87f3f is a mask of SQLITE_OPEN_ flags that are valid to be passed ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example, ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before ** reaching the VFS. */ - rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x7f3f, pFlagsOut); + rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x87f3f, pFlagsOut); assert( rc==SQLITE_OK || pFile->pMethods==0 ); return rc; } @@ -161,8 +181,16 @@ int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ return pVfs->xSleep(pVfs, nMicro); } -int sqlite3OsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ - return pVfs->xCurrentTime(pVfs, pTimeOut); +int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ + int rc; + if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){ + rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut); + }else{ + double r; + rc = pVfs->xCurrentTime(pVfs, &r); + *pTimeOut = (sqlite3_int64)(r*86400000.0); + } + return rc; } int sqlite3OsOpenMalloc( diff --git a/src/os.h b/src/os.h index 089901e..7f17c20 100644 --- a/src/os.h +++ b/src/os.h @@ -217,7 +217,11 @@ ** 1GB boundary. ** */ -#define PENDING_BYTE sqlite3PendingByte +#ifdef SQLITE_OMIT_WSD +# define PENDING_BYTE (0x40000000) +#else +# define PENDING_BYTE sqlite3PendingByte +#endif #define RESERVED_BYTE (PENDING_BYTE+1) #define SHARED_FIRST (PENDING_BYTE+2) #define SHARED_SIZE 510 @@ -243,6 +247,10 @@ int sqlite3OsFileControl(sqlite3_file*,int,void*); #define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 int sqlite3OsSectorSize(sqlite3_file *id); int sqlite3OsDeviceCharacteristics(sqlite3_file *id); +int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); +int sqlite3OsShmLock(sqlite3_file *id, int, int, int); +void sqlite3OsShmBarrier(sqlite3_file *id); +int sqlite3OsShmUnmap(sqlite3_file *id, int); /* ** Functions for accessing sqlite3_vfs methods @@ -259,7 +267,7 @@ void sqlite3OsDlClose(sqlite3_vfs *, void *); #endif /* SQLITE_OMIT_LOAD_EXTENSION */ int sqlite3OsRandomness(sqlite3_vfs *, int, char *); int sqlite3OsSleep(sqlite3_vfs *, int); -int sqlite3OsCurrentTime(sqlite3_vfs *, double*); +int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); /* ** Convenience functions for opening and closing files using diff --git a/src/os_common.h b/src/os_common.h index 6a2e2d9..eba856b 100644 --- a/src/os_common.h +++ b/src/os_common.h @@ -31,23 +31,9 @@ #ifdef SQLITE_DEBUG int sqlite3OSTrace = 0; -#define OSTRACE1(X) if( sqlite3OSTrace ) sqlite3DebugPrintf(X) -#define OSTRACE2(X,Y) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y) -#define OSTRACE3(X,Y,Z) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z) -#define OSTRACE4(X,Y,Z,A) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z,A) -#define OSTRACE5(X,Y,Z,A,B) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z,A,B) -#define OSTRACE6(X,Y,Z,A,B,C) \ - if(sqlite3OSTrace) sqlite3DebugPrintf(X,Y,Z,A,B,C) -#define OSTRACE7(X,Y,Z,A,B,C,D) \ - if(sqlite3OSTrace) sqlite3DebugPrintf(X,Y,Z,A,B,C,D) +#define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X #else -#define OSTRACE1(X) -#define OSTRACE2(X,Y) -#define OSTRACE3(X,Y,Z) -#define OSTRACE4(X,Y,Z,A) -#define OSTRACE5(X,Y,Z,A,B) -#define OSTRACE6(X,Y,Z,A,B,C) -#define OSTRACE7(X,Y,Z,A,B,C,D) +#define OSTRACE(X) #endif /* diff --git a/src/os_os2.c b/src/os_os2.c index 572b6a3..2b3fd1c 100644 --- a/src/os_os2.c +++ b/src/os_os2.c @@ -81,7 +81,7 @@ static int os2Close( sqlite3_file *id ){ APIRET rc = NO_ERROR; os2File *pFile; if( id && (pFile = (os2File*)id) != 0 ){ - OSTRACE2( "CLOSE %d\n", pFile->h ); + OSTRACE(( "CLOSE %d\n", pFile->h )); rc = DosClose( pFile->h ); pFile->locktype = NO_LOCK; if( pFile->pathToDel != NULL ){ @@ -112,7 +112,7 @@ static int os2Read( os2File *pFile = (os2File*)id; assert( id!=0 ); SimulateIOError( return SQLITE_IOERR_READ ); - OSTRACE3( "READ %d lock=%d\n", pFile->h, pFile->locktype ); + OSTRACE(( "READ %d lock=%d\n", pFile->h, pFile->locktype )); if( DosSetFilePtr(pFile->h, offset, FILE_BEGIN, &fileLocation) != NO_ERROR ){ return SQLITE_IOERR; } @@ -145,7 +145,7 @@ static int os2Write( assert( id!=0 ); SimulateIOError( return SQLITE_IOERR_WRITE ); SimulateDiskfullError( return SQLITE_FULL ); - OSTRACE3( "WRITE %d lock=%d\n", pFile->h, pFile->locktype ); + OSTRACE(( "WRITE %d lock=%d\n", pFile->h, pFile->locktype )); if( DosSetFilePtr(pFile->h, offset, FILE_BEGIN, &fileLocation) != NO_ERROR ){ return SQLITE_IOERR; } @@ -167,7 +167,7 @@ static int os2Write( static int os2Truncate( sqlite3_file *id, i64 nByte ){ APIRET rc = NO_ERROR; os2File *pFile = (os2File*)id; - OSTRACE3( "TRUNCATE %d %lld\n", pFile->h, nByte ); + OSTRACE(( "TRUNCATE %d %lld\n", pFile->h, nByte )); SimulateIOError( return SQLITE_IOERR_TRUNCATE ); rc = DosSetFileSize( pFile->h, nByte ); return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR_TRUNCATE; @@ -187,7 +187,7 @@ int sqlite3_fullsync_count = 0; */ static int os2Sync( sqlite3_file *id, int flags ){ os2File *pFile = (os2File*)id; - OSTRACE3( "SYNC %d lock=%d\n", pFile->h, pFile->locktype ); + OSTRACE(( "SYNC %d lock=%d\n", pFile->h, pFile->locktype )); #ifdef SQLITE_TEST if( flags & SQLITE_SYNC_FULL){ sqlite3_fullsync_count++; @@ -237,7 +237,7 @@ static int getReadLock( os2File *pFile ){ UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 1L ); - OSTRACE3( "GETREADLOCK %d res=%d\n", pFile->h, res ); + OSTRACE(( "GETREADLOCK %d res=%d\n", pFile->h, res )); return res; } @@ -255,7 +255,7 @@ static int unlockReadLock( os2File *id ){ UnlockArea.lOffset = SHARED_FIRST; UnlockArea.lRange = SHARED_SIZE; res = DosSetFileLocks( id->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 1L ); - OSTRACE3( "UNLOCK-READLOCK file handle=%d res=%d?\n", id->h, res ); + OSTRACE(( "UNLOCK-READLOCK file handle=%d res=%d?\n", id->h, res )); return res; } @@ -296,14 +296,14 @@ static int os2Lock( sqlite3_file *id, int locktype ){ memset(&LockArea, 0, sizeof(LockArea)); memset(&UnlockArea, 0, sizeof(UnlockArea)); assert( pFile!=0 ); - OSTRACE4( "LOCK %d %d was %d\n", pFile->h, locktype, pFile->locktype ); + OSTRACE(( "LOCK %d %d was %d\n", pFile->h, locktype, pFile->locktype )); /* If there is already a lock of this type or more restrictive on the ** os2File, do nothing. Don't use the end_lock: exit path, as ** sqlite3_mutex_enter() hasn't been called yet. */ if( pFile->locktype>=locktype ){ - OSTRACE3( "LOCK %d %d ok (already held)\n", pFile->h, locktype ); + OSTRACE(( "LOCK %d %d ok (already held)\n", pFile->h, locktype )); return SQLITE_OK; } @@ -330,7 +330,7 @@ static int os2Lock( sqlite3_file *id, int locktype ){ res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 100L, 0L ); if( res == NO_ERROR ){ gotPendingLock = 1; - OSTRACE3( "LOCK %d pending lock boolean set. res=%d\n", pFile->h, res ); + OSTRACE(( "LOCK %d pending lock boolean set. res=%d\n", pFile->h, res )); } } @@ -342,7 +342,7 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( res == NO_ERROR ){ newLocktype = SHARED_LOCK; } - OSTRACE3( "LOCK %d acquire shared lock. res=%d\n", pFile->h, res ); + OSTRACE(( "LOCK %d acquire shared lock. res=%d\n", pFile->h, res )); } /* Acquire a RESERVED lock @@ -357,7 +357,7 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( res == NO_ERROR ){ newLocktype = RESERVED_LOCK; } - OSTRACE3( "LOCK %d acquire reserved lock. res=%d\n", pFile->h, res ); + OSTRACE(( "LOCK %d acquire reserved lock. res=%d\n", pFile->h, res )); } /* Acquire a PENDING lock @@ -365,7 +365,8 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( locktype==EXCLUSIVE_LOCK && res == NO_ERROR ){ newLocktype = PENDING_LOCK; gotPendingLock = 0; - OSTRACE2( "LOCK %d acquire pending lock. pending lock boolean unset.\n", pFile->h ); + OSTRACE(( "LOCK %d acquire pending lock. pending lock boolean unset.\n", + pFile->h )); } /* Acquire an EXCLUSIVE lock @@ -373,7 +374,7 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( locktype==EXCLUSIVE_LOCK && res == NO_ERROR ){ assert( pFile->locktype>=SHARED_LOCK ); res = unlockReadLock(pFile); - OSTRACE2( "unreadlock = %d\n", res ); + OSTRACE(( "unreadlock = %d\n", res )); LockArea.lOffset = SHARED_FIRST; LockArea.lRange = SHARED_SIZE; UnlockArea.lOffset = 0L; @@ -382,10 +383,10 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( res == NO_ERROR ){ newLocktype = EXCLUSIVE_LOCK; }else{ - OSTRACE2( "OS/2 error-code = %d\n", res ); + OSTRACE(( "OS/2 error-code = %d\n", res )); getReadLock(pFile); } - OSTRACE3( "LOCK %d acquire exclusive lock. res=%d\n", pFile->h, res ); + OSTRACE(( "LOCK %d acquire exclusive lock. res=%d\n", pFile->h, res )); } /* If we are holding a PENDING lock that ought to be released, then @@ -398,7 +399,7 @@ static int os2Lock( sqlite3_file *id, int locktype ){ UnlockArea.lOffset = PENDING_BYTE; UnlockArea.lRange = 1L; r = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "LOCK %d unlocking pending/is shared. r=%d\n", pFile->h, r ); + OSTRACE(( "LOCK %d unlocking pending/is shared. r=%d\n", pFile->h, r )); } /* Update the state of the lock has held in the file descriptor then @@ -407,12 +408,12 @@ static int os2Lock( sqlite3_file *id, int locktype ){ if( res == NO_ERROR ){ rc = SQLITE_OK; }else{ - OSTRACE4( "LOCK FAILED %d trying for %d but got %d\n", pFile->h, - locktype, newLocktype ); + OSTRACE(( "LOCK FAILED %d trying for %d but got %d\n", pFile->h, + locktype, newLocktype )); rc = SQLITE_BUSY; } pFile->locktype = newLocktype; - OSTRACE3( "LOCK %d now %d\n", pFile->h, pFile->locktype ); + OSTRACE(( "LOCK %d now %d\n", pFile->h, pFile->locktype )); return rc; } @@ -427,7 +428,7 @@ static int os2CheckReservedLock( sqlite3_file *id, int *pOut ){ assert( pFile!=0 ); if( pFile->locktype>=RESERVED_LOCK ){ r = 1; - OSTRACE3( "TEST WR-LOCK %d %d (local)\n", pFile->h, r ); + OSTRACE(( "TEST WR-LOCK %d %d (local)\n", pFile->h, r )); }else{ FILELOCK LockArea, UnlockArea; @@ -439,7 +440,7 @@ static int os2CheckReservedLock( sqlite3_file *id, int *pOut ){ UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "TEST WR-LOCK %d lock reserved byte rc=%d\n", pFile->h, rc ); + OSTRACE(( "TEST WR-LOCK %d lock reserved byte rc=%d\n", pFile->h, rc )); if( rc == NO_ERROR ){ APIRET rcu = NO_ERROR; /* return code for unlocking */ LockArea.lOffset = 0L; @@ -447,10 +448,10 @@ static int os2CheckReservedLock( sqlite3_file *id, int *pOut ){ UnlockArea.lOffset = RESERVED_BYTE; UnlockArea.lRange = 1L; rcu = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "TEST WR-LOCK %d unlock reserved byte r=%d\n", pFile->h, rcu ); + OSTRACE(( "TEST WR-LOCK %d unlock reserved byte r=%d\n", pFile->h, rcu )); } r = !(rc == NO_ERROR); - OSTRACE3( "TEST WR-LOCK %d %d (remote)\n", pFile->h, r ); + OSTRACE(( "TEST WR-LOCK %d %d (remote)\n", pFile->h, r )); } *pOut = r; return SQLITE_OK; @@ -478,7 +479,7 @@ static int os2Unlock( sqlite3_file *id, int locktype ){ memset(&UnlockArea, 0, sizeof(UnlockArea)); assert( pFile!=0 ); assert( locktype<=SHARED_LOCK ); - OSTRACE4( "UNLOCK %d to %d was %d\n", pFile->h, locktype, pFile->locktype ); + OSTRACE(( "UNLOCK %d to %d was %d\n", pFile->h, locktype, pFile->locktype )); type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ LockArea.lOffset = 0L; @@ -486,11 +487,11 @@ static int os2Unlock( sqlite3_file *id, int locktype ){ UnlockArea.lOffset = SHARED_FIRST; UnlockArea.lRange = SHARED_SIZE; res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "UNLOCK %d exclusive lock res=%d\n", pFile->h, res ); + OSTRACE(( "UNLOCK %d exclusive lock res=%d\n", pFile->h, res )); if( locktype==SHARED_LOCK && getReadLock(pFile) != NO_ERROR ){ /* This should never happen. We should always be able to ** reacquire the read lock */ - OSTRACE3( "UNLOCK %d to %d getReadLock() failed\n", pFile->h, locktype ); + OSTRACE(( "UNLOCK %d to %d getReadLock() failed\n", pFile->h, locktype )); rc = SQLITE_IOERR_UNLOCK; } } @@ -500,11 +501,12 @@ static int os2Unlock( sqlite3_file *id, int locktype ){ UnlockArea.lOffset = RESERVED_BYTE; UnlockArea.lRange = 1L; res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "UNLOCK %d reserved res=%d\n", pFile->h, res ); + OSTRACE(( "UNLOCK %d reserved res=%d\n", pFile->h, res )); } if( locktype==NO_LOCK && type>=SHARED_LOCK ){ res = unlockReadLock(pFile); - OSTRACE5( "UNLOCK %d is %d want %d res=%d\n", pFile->h, type, locktype, res ); + OSTRACE(( "UNLOCK %d is %d want %d res=%d\n", + pFile->h, type, locktype, res )); } if( type>=PENDING_LOCK ){ LockArea.lOffset = 0L; @@ -512,10 +514,10 @@ static int os2Unlock( sqlite3_file *id, int locktype ){ UnlockArea.lOffset = PENDING_BYTE; UnlockArea.lRange = 1L; res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); - OSTRACE3( "UNLOCK %d pending res=%d\n", pFile->h, res ); + OSTRACE(( "UNLOCK %d pending res=%d\n", pFile->h, res )); } pFile->locktype = locktype; - OSTRACE3( "UNLOCK %d now %d\n", pFile->h, pFile->locktype ); + OSTRACE(( "UNLOCK %d now %d\n", pFile->h, pFile->locktype )); return rc; } @@ -526,7 +528,8 @@ static int os2FileControl(sqlite3_file *id, int op, void *pArg){ switch( op ){ case SQLITE_FCNTL_LOCKSTATE: { *(int*)pArg = ((os2File*)id)->locktype; - OSTRACE3( "FCNTL_LOCKSTATE %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + OSTRACE(( "FCNTL_LOCKSTATE %d lock=%d\n", + ((os2File*)id)->h, ((os2File*)id)->locktype )); return SQLITE_OK; } } @@ -713,7 +716,7 @@ static int getTempname(int nBuf, char *zBuf ){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0; - OSTRACE2( "TEMP FILENAME: %s\n", zBuf ); + OSTRACE(( "TEMP FILENAME: %s\n", zBuf )); return SQLITE_OK; } @@ -776,30 +779,30 @@ static int os2Open( memset( pFile, 0, sizeof(*pFile) ); - OSTRACE2( "OPEN want %d\n", flags ); + OSTRACE( "OPEN want %d\n", flags )); if( flags & SQLITE_OPEN_READWRITE ){ ulOpenMode |= OPEN_ACCESS_READWRITE; - OSTRACE1( "OPEN read/write\n" ); + OSTRACE(( "OPEN read/write\n" )); }else{ ulOpenMode |= OPEN_ACCESS_READONLY; - OSTRACE1( "OPEN read only\n" ); + OSTRACE(( "OPEN read only\n" )); } if( flags & SQLITE_OPEN_CREATE ){ ulOpenFlags |= OPEN_ACTION_OPEN_IF_EXISTS | OPEN_ACTION_CREATE_IF_NEW; - OSTRACE1( "OPEN open new/create\n" ); + OSTRACE(( "OPEN open new/create\n" )); }else{ ulOpenFlags |= OPEN_ACTION_OPEN_IF_EXISTS | OPEN_ACTION_FAIL_IF_NEW; - OSTRACE1( "OPEN open existing\n" ); + OSTRACE(( "OPEN open existing\n" )); } if( flags & SQLITE_OPEN_MAIN_DB ){ ulOpenMode |= OPEN_SHARE_DENYNONE; - OSTRACE1( "OPEN share read/write\n" ); + OSTRACE(( "OPEN share read/write\n" )); }else{ ulOpenMode |= OPEN_SHARE_DENYWRITE; - OSTRACE1( "OPEN share read only\n" ); + OSTRACE(( "OPEN share read only\n" )); } if( flags & SQLITE_OPEN_DELETEONCLOSE ){ @@ -809,10 +812,10 @@ static int os2Open( #endif os2FullPathname( pVfs, zName, CCHMAXPATH, pathUtf8 ); pFile->pathToDel = convertUtf8PathToCp( pathUtf8 ); - OSTRACE1( "OPEN hidden/delete on close file attributes\n" ); + OSTRACE(( "OPEN hidden/delete on close file attributes\n" )); }else{ pFile->pathToDel = NULL; - OSTRACE1( "OPEN normal file attribute\n" ); + OSTRACE(( "OPEN normal file attribute\n" )); } /* always open in random access mode for possibly better speed */ @@ -831,13 +834,14 @@ static int os2Open( (PEAOP2)NULL ); free( zNameCp ); if( rc != NO_ERROR ){ - OSTRACE7( "OPEN Invalid handle rc=%d: zName=%s, ulAction=%#lx, ulAttr=%#lx, ulFlags=%#lx, ulMode=%#lx\n", - rc, zName, ulAction, ulFileAttribute, ulOpenFlags, ulOpenMode ); + OSTRACE(( "OPEN Invalid handle rc=%d: zName=%s, ulAction=%#lx, ulAttr=%#lx, ulFlags=%#lx, ulMode=%#lx\n", + rc, zName, ulAction, ulFileAttribute, ulOpenFlags, ulOpenMode )); if( pFile->pathToDel ) free( pFile->pathToDel ); pFile->pathToDel = NULL; if( flags & SQLITE_OPEN_READWRITE ){ - OSTRACE2( "OPEN %d Invalid handle\n", ((flags | SQLITE_OPEN_READONLY) & ~SQLITE_OPEN_READWRITE) ); + OSTRACE(( "OPEN %d Invalid handle\n", + ((flags | SQLITE_OPEN_READONLY) & ~SQLITE_OPEN_READWRITE) )); return os2Open( pVfs, zName, id, ((flags | SQLITE_OPEN_READONLY) & ~SQLITE_OPEN_READWRITE), pOutFlags ); @@ -853,7 +857,7 @@ static int os2Open( pFile->pMethod = &os2IoMethod; pFile->h = h; OpenCounter(+1); - OSTRACE3( "OPEN %d pOutFlags=%d\n", pFile->h, pOutFlags ); + OSTRACE(( "OPEN %d pOutFlags=%d\n", pFile->h, pOutFlags )); return SQLITE_OK; } @@ -870,7 +874,7 @@ static int os2Delete( SimulateIOError( return SQLITE_IOERR_DELETE ); rc = DosDelete( (PSZ)zFilenameCp ); free( zFilenameCp ); - OSTRACE2( "DELETE \"%s\"\n", zFilename ); + OSTRACE(( "DELETE \"%s\"\n", zFilename )); return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR_DELETE; } @@ -891,17 +895,17 @@ static int os2Access( rc = DosQueryPathInfo( (PSZ)zFilenameCp, FIL_STANDARD, &fsts3ConfigInfo, sizeof(FILESTATUS3) ); free( zFilenameCp ); - OSTRACE4( "ACCESS fsts3ConfigInfo.attrFile=%d flags=%d rc=%d\n", - fsts3ConfigInfo.attrFile, flags, rc ); + OSTRACE(( "ACCESS fsts3ConfigInfo.attrFile=%d flags=%d rc=%d\n", + fsts3ConfigInfo.attrFile, flags, rc )); switch( flags ){ case SQLITE_ACCESS_READ: case SQLITE_ACCESS_EXISTS: rc = (rc == NO_ERROR); - OSTRACE3( "ACCESS %s access of read and exists rc=%d\n", zFilename, rc ); + OSTRACE(( "ACCESS %s access of read and exists rc=%d\n", zFilename, rc)); break; case SQLITE_ACCESS_READWRITE: rc = (rc == NO_ERROR) && ( (fsts3ConfigInfo.attrFile & FILE_READONLY) == 0 ); - OSTRACE3( "ACCESS %s access of read/write rc=%d\n", zFilename, rc ); + OSTRACE(( "ACCESS %s access of read/write rc=%d\n", zFilename, rc )); break; default: assert( !"Invalid flags argument" ); @@ -1111,7 +1115,7 @@ int sqlite3_os_init(void){ os2Randomness, /* xRandomness */ os2Sleep, /* xSleep */ os2CurrentTime, /* xCurrentTime */ - os2GetLastError /* xGetLastError */ + os2GetLastError, /* xGetLastError */ }; sqlite3_vfs_register(&os2Vfs, 1); initUconvObjects(); diff --git a/src/os_unix.c b/src/os_unix.c index 769e75d..9457516 100644 --- a/src/os_unix.c +++ b/src/os_unix.c @@ -119,6 +119,7 @@ #include #include #include +#include #if SQLITE_ENABLE_LOCKING_STYLE # include @@ -174,6 +175,11 @@ */ #define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY)) +/* Forward references */ +typedef struct unixShm unixShm; /* Connection shared memory */ +typedef struct unixShmNode unixShmNode; /* Shared memory instance */ +typedef struct unixInodeInfo unixInodeInfo; /* An i-node */ +typedef struct UnixUnusedFd UnixUnusedFd; /* An unused file descriptor */ /* ** Sometimes, after a file handle is closed by SQLite, the file descriptor @@ -181,7 +187,6 @@ ** structure are used to store the file descriptor while waiting for an ** opportunity to either close or reuse it. */ -typedef struct UnixUnusedFd UnixUnusedFd; struct UnixUnusedFd { int fd; /* File descriptor to close */ int flags; /* Flags this file descriptor was opened with */ @@ -195,27 +200,25 @@ struct UnixUnusedFd { typedef struct unixFile unixFile; struct unixFile { sqlite3_io_methods const *pMethod; /* Always the first entry */ - struct unixOpenCnt *pOpen; /* Info about all open fd's on this inode */ - struct unixLockInfo *pLock; /* Info about locks on this inode */ - int h; /* The file descriptor */ - int dirfd; /* File descriptor for the directory */ - unsigned char locktype; /* The type of lock held on this fd */ - int lastErrno; /* The unix errno from the last I/O error */ - void *lockingContext; /* Locking style specific state */ - UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ - int fileFlags; /* Miscellanous flags */ + unixInodeInfo *pInode; /* Info about locks on this inode */ + int h; /* The file descriptor */ + int dirfd; /* File descriptor for the directory */ + unsigned char eFileLock; /* The type of lock held on this fd */ + int lastErrno; /* The unix errno from last I/O error */ + void *lockingContext; /* Locking style specific state */ + UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ + int fileFlags; /* Miscellanous flags */ + const char *zPath; /* Name of the file */ + unixShm *pShm; /* Shared memory segment information */ #if SQLITE_ENABLE_LOCKING_STYLE - int openFlags; /* The flags specified at open() */ + int openFlags; /* The flags specified at open() */ #endif #if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__) - unsigned fsFlags; /* cached details from statfs() */ -#endif -#if SQLITE_THREADSAFE && defined(__linux__) - pthread_t tid; /* The thread that "owns" this unixFile */ + unsigned fsFlags; /* cached details from statfs() */ #endif #if OS_VXWORKS - int isDelete; /* Delete on close if true */ - struct vxworksFileId *pId; /* Unique file ID */ + int isDelete; /* Delete on close if true */ + struct vxworksFileId *pId; /* Unique file ID */ #endif #ifndef NDEBUG /* The next group of variables are used to track whether or not the @@ -287,7 +290,7 @@ struct unixFile { /* ** Helper functions to obtain and relinquish the global mutex. The -** global mutex is used to protect the unixOpenCnt, unixLockInfo and +** global mutex is used to protect the unixInodeInfo and ** vxworksFileId objects used by this file, all of which may be ** shared by multiple threads. ** @@ -318,8 +321,8 @@ static int unixMutexHeld(void) { ** binaries. This returns the string represetation of the supplied ** integer lock-type. */ -static const char *locktypeName(int locktype){ - switch( locktype ){ +static const char *azFileLock(int eFileLock){ + switch( eFileLock ){ case NO_LOCK: return "NONE"; case SHARED_LOCK: return "SHARED"; case RESERVED_LOCK: return "RESERVED"; @@ -656,13 +659,12 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){ ** ** If you close a file descriptor that points to a file that has locks, ** all locks on that file that are owned by the current process are -** released. To work around this problem, each unixFile structure contains -** a pointer to an unixOpenCnt structure. There is one unixOpenCnt structure -** per open inode, which means that multiple unixFile can point to a single -** unixOpenCnt. When an attempt is made to close an unixFile, if there are +** released. To work around this problem, each unixInodeInfo object +** maintains a count of the number of pending locks on tha inode. +** When an attempt is made to close an unixFile, if there are ** other unixFile open on the same inode that are holding locks, the call ** to close() the file descriptor is deferred until all of the locks clear. -** The unixOpenCnt structure keeps a list of file descriptors that need to +** The unixInodeInfo structure keeps a list of file descriptors that need to ** be closed and that list is walked (and cleared) when the last lock ** clears. ** @@ -677,46 +679,19 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){ ** in thread B. But there is no way to know at compile-time which ** threading library is being used. So there is no way to know at ** compile-time whether or not thread A can override locks on thread B. -** We have to do a run-time check to discover the behavior of the +** One has to do a run-time check to discover the behavior of the ** current process. ** -** On systems where thread A is unable to modify locks created by -** thread B, we have to keep track of which thread created each -** lock. Hence there is an extra field in the key to the unixLockInfo -** structure to record this information. And on those systems it -** is illegal to begin a transaction in one thread and finish it -** in another. For this latter restriction, there is no work-around. -** It is a limitation of LinuxThreads. +** SQLite used to support LinuxThreads. But support for LinuxThreads +** was dropped beginning with version 3.7.0. SQLite will still work with +** LinuxThreads provided that (1) there is no more than one connection +** per database file in the same process and (2) database connections +** do not move across threads. */ -/* -** Set or check the unixFile.tid field. This field is set when an unixFile -** is first opened. All subsequent uses of the unixFile verify that the -** same thread is operating on the unixFile. Some operating systems do -** not allow locks to be overridden by other threads and that restriction -** means that sqlite3* database handles cannot be moved from one thread -** to another while locks are held. -** -** Version 3.3.1 (2006-01-15): unixFile can be moved from one thread to -** another as long as we are running on a system that supports threads -** overriding each others locks (which is now the most common behavior) -** or if no locks are held. But the unixFile.pLock field needs to be -** recomputed because its key includes the thread-id. See the -** transferOwnership() function below for additional information -*/ -#if SQLITE_THREADSAFE && defined(__linux__) -# define SET_THREADID(X) (X)->tid = pthread_self() -# define CHECK_THREADID(X) (threadsOverrideEachOthersLocks==0 && \ - !pthread_equal((X)->tid, pthread_self())) -#else -# define SET_THREADID(X) -# define CHECK_THREADID(X) 0 -#endif - /* ** An instance of the following structure serves as the key used -** to locate a particular unixOpenCnt structure given its inode. This -** is the same as the unixLockKey except that the thread ID is omitted. +** to locate a particular unixInodeInfo object. */ struct unixFileId { dev_t dev; /* Device number */ @@ -727,23 +702,6 @@ struct unixFileId { #endif }; -/* -** An instance of the following structure serves as the key used -** to locate a particular unixLockInfo structure given its inode. -** -** If threads cannot override each others locks (LinuxThreads), then we -** set the unixLockKey.tid field to the thread ID. If threads can override -** each others locks (Posix and NPTL) then tid is always set to zero. -** tid is omitted if we compile without threading support or on an OS -** other than linux. -*/ -struct unixLockKey { - struct unixFileId fid; /* Unique identifier for the file */ -#if SQLITE_THREADSAFE && defined(__linux__) - pthread_t tid; /* Thread ID of lock owner. Zero if not using LinuxThreads */ -#endif -}; - /* ** An instance of the following structure is allocated for each open ** inode. Or, on LinuxThreads, there is one of these structures for @@ -753,230 +711,109 @@ struct unixLockKey { ** structure contains a pointer to an instance of this object and this ** object keeps a count of the number of unixFile pointing to it. */ -struct unixLockInfo { - struct unixLockKey lockKey; /* The lookup key */ - int cnt; /* Number of SHARED locks held */ - int locktype; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ +struct unixInodeInfo { + struct unixFileId fileId; /* The lookup key */ + int nShared; /* Number of SHARED locks held */ + int eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ int nRef; /* Number of pointers to this structure */ + unixShmNode *pShmNode; /* Shared memory associated with this inode */ + int nLock; /* Number of outstanding file locks */ + UnixUnusedFd *pUnused; /* Unused file descriptors to close */ + unixInodeInfo *pNext; /* List of all unixInodeInfo objects */ + unixInodeInfo *pPrev; /* .... doubly linked */ #if defined(SQLITE_ENABLE_LOCKING_STYLE) unsigned long long sharedByte; /* for AFP simulated shared lock */ #endif - struct unixLockInfo *pNext; /* List of all unixLockInfo objects */ - struct unixLockInfo *pPrev; /* .... doubly linked */ -}; - -/* -** An instance of the following structure is allocated for each open -** inode. This structure keeps track of the number of locks on that -** inode. If a close is attempted against an inode that is holding -** locks, the close is deferred until all locks clear by adding the -** file descriptor to be closed to the pending list. -** -** TODO: Consider changing this so that there is only a single file -** descriptor for each open file, even when it is opened multiple times. -** The close() system call would only occur when the last database -** using the file closes. -*/ -struct unixOpenCnt { - struct unixFileId fileId; /* The lookup key */ - int nRef; /* Number of pointers to this structure */ - int nLock; /* Number of outstanding locks */ - UnixUnusedFd *pUnused; /* Unused file descriptors to close */ #if OS_VXWORKS - sem_t *pSem; /* Named POSIX semaphore */ - char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ + sem_t *pSem; /* Named POSIX semaphore */ + char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ #endif - struct unixOpenCnt *pNext, *pPrev; /* List of all unixOpenCnt objects */ }; /* -** Lists of all unixLockInfo and unixOpenCnt objects. These used to be hash -** tables. But the number of objects is rarely more than a dozen and -** never exceeds a few thousand. And lookup is not on a critical -** path so a simple linked list will suffice. +** A lists of all unixInodeInfo objects. */ -static struct unixLockInfo *lockList = 0; -static struct unixOpenCnt *openList = 0; +static unixInodeInfo *inodeList = 0; /* -** This variable remembers whether or not threads can override each others -** locks. +** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. +** If all such file descriptors are closed without error, the list is +** cleared and SQLITE_OK returned. ** -** 0: No. Threads cannot override each others locks. (LinuxThreads) -** 1: Yes. Threads can override each others locks. (Posix & NLPT) -** -1: We don't know yet. -** -** On some systems, we know at compile-time if threads can override each -** others locks. On those systems, the SQLITE_THREAD_OVERRIDE_LOCK macro -** will be set appropriately. On other systems, we have to check at -** runtime. On these latter systems, SQLTIE_THREAD_OVERRIDE_LOCK is -** undefined. -** -** This variable normally has file scope only. But during testing, we make -** it a global so that the test code can change its value in order to verify -** that the right stuff happens in either case. -*/ -#if SQLITE_THREADSAFE && defined(__linux__) -# ifndef SQLITE_THREAD_OVERRIDE_LOCK -# define SQLITE_THREAD_OVERRIDE_LOCK -1 -# endif -# ifdef SQLITE_TEST -int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; -# else -static int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; -# endif -#endif - -/* -** This structure holds information passed into individual test -** threads by the testThreadLockingBehavior() routine. -*/ -struct threadTestData { - int fd; /* File to be locked */ - struct flock lock; /* The locking operation */ - int result; /* Result of the locking operation */ -}; - -#if SQLITE_THREADSAFE && defined(__linux__) -/* -** This function is used as the main routine for a thread launched by -** testThreadLockingBehavior(). It tests whether the shared-lock obtained -** by the main thread in testThreadLockingBehavior() conflicts with a -** hypothetical write-lock obtained by this thread on the same file. -** -** The write-lock is not actually acquired, as this is not possible if -** the file is open in read-only mode (see ticket #3472). +** Otherwise, if an error occurs, then successfully closed file descriptor +** entries are removed from the list, and SQLITE_IOERR_CLOSE returned. +** not deleted and SQLITE_IOERR_CLOSE returned. */ -static void *threadLockingTest(void *pArg){ - struct threadTestData *pData = (struct threadTestData*)pArg; - pData->result = fcntl(pData->fd, F_GETLK, &pData->lock); - return pArg; -} -#endif /* SQLITE_THREADSAFE && defined(__linux__) */ - - -#if SQLITE_THREADSAFE && defined(__linux__) -/* -** This procedure attempts to determine whether or not threads -** can override each others locks then sets the -** threadsOverrideEachOthersLocks variable appropriately. -*/ -static void testThreadLockingBehavior(int fd_orig){ - int fd; - int rc; - struct threadTestData d; - struct flock l; - pthread_t t; - - fd = dup(fd_orig); - if( fd<0 ) return; - memset(&l, 0, sizeof(l)); - l.l_type = F_RDLCK; - l.l_len = 1; - l.l_start = 0; - l.l_whence = SEEK_SET; - rc = fcntl(fd_orig, F_SETLK, &l); - if( rc!=0 ) return; - memset(&d, 0, sizeof(d)); - d.fd = fd; - d.lock = l; - d.lock.l_type = F_WRLCK; - if( pthread_create(&t, 0, threadLockingTest, &d)==0 ){ - pthread_join(t, 0); +static int closePendingFds(unixFile *pFile){ + int rc = SQLITE_OK; + unixInodeInfo *pInode = pFile->pInode; + UnixUnusedFd *pError = 0; + UnixUnusedFd *p; + UnixUnusedFd *pNext; + for(p=pInode->pUnused; p; p=pNext){ + pNext = p->pNext; + if( close(p->fd) ){ + pFile->lastErrno = errno; + rc = SQLITE_IOERR_CLOSE; + p->pNext = pError; + pError = p; + }else{ + sqlite3_free(p); + } } - close(fd); - if( d.result!=0 ) return; - threadsOverrideEachOthersLocks = (d.lock.l_type==F_UNLCK); + pInode->pUnused = pError; + return rc; } -#endif /* SQLITE_THREADSAFE && defined(__linux__) */ /* -** Release a unixLockInfo structure previously allocated by findLockInfo(). +** Release a unixInodeInfo structure previously allocated by findInodeInfo(). ** ** The mutex entered using the unixEnterMutex() function must be held ** when this function is called. */ -static void releaseLockInfo(struct unixLockInfo *pLock){ +static void releaseInodeInfo(unixFile *pFile){ + unixInodeInfo *pInode = pFile->pInode; assert( unixMutexHeld() ); - if( pLock ){ - pLock->nRef--; - if( pLock->nRef==0 ){ - if( pLock->pPrev ){ - assert( pLock->pPrev->pNext==pLock ); - pLock->pPrev->pNext = pLock->pNext; + if( pInode ){ + pInode->nRef--; + if( pInode->nRef==0 ){ + assert( pInode->pShmNode==0 ); + closePendingFds(pFile); + if( pInode->pPrev ){ + assert( pInode->pPrev->pNext==pInode ); + pInode->pPrev->pNext = pInode->pNext; }else{ - assert( lockList==pLock ); - lockList = pLock->pNext; + assert( inodeList==pInode ); + inodeList = pInode->pNext; } - if( pLock->pNext ){ - assert( pLock->pNext->pPrev==pLock ); - pLock->pNext->pPrev = pLock->pPrev; + if( pInode->pNext ){ + assert( pInode->pNext->pPrev==pInode ); + pInode->pNext->pPrev = pInode->pPrev; } - sqlite3_free(pLock); + sqlite3_free(pInode); } } } /* -** Release a unixOpenCnt structure previously allocated by findLockInfo(). -** -** The mutex entered using the unixEnterMutex() function must be held -** when this function is called. -*/ -static void releaseOpenCnt(struct unixOpenCnt *pOpen){ - assert( unixMutexHeld() ); - if( pOpen ){ - pOpen->nRef--; - if( pOpen->nRef==0 ){ - if( pOpen->pPrev ){ - assert( pOpen->pPrev->pNext==pOpen ); - pOpen->pPrev->pNext = pOpen->pNext; - }else{ - assert( openList==pOpen ); - openList = pOpen->pNext; - } - if( pOpen->pNext ){ - assert( pOpen->pNext->pPrev==pOpen ); - pOpen->pNext->pPrev = pOpen->pPrev; - } -#if SQLITE_THREADSAFE && defined(__linux__) - assert( !pOpen->pUnused || threadsOverrideEachOthersLocks==0 ); -#endif - - /* If pOpen->pUnused is not null, then memory and file-descriptors - ** are leaked. - ** - ** This will only happen if, under Linuxthreads, the user has opened - ** a transaction in one thread, then attempts to close the database - ** handle from another thread (without first unlocking the db file). - ** This is a misuse. */ - sqlite3_free(pOpen); - } - } -} - -/* -** Given a file descriptor, locate unixLockInfo and unixOpenCnt structures that -** describes that file descriptor. Create new ones if necessary. The -** return values might be uninitialized if an error occurs. +** Given a file descriptor, locate the unixInodeInfo object that +** describes that file descriptor. Create a new one if necessary. The +** return value might be uninitialized if an error occurs. ** ** The mutex entered using the unixEnterMutex() function must be held ** when this function is called. ** ** Return an appropriate error code. */ -static int findLockInfo( +static int findInodeInfo( unixFile *pFile, /* Unix file with file desc used in the key */ - struct unixLockInfo **ppLock, /* Return the unixLockInfo structure here */ - struct unixOpenCnt **ppOpen /* Return the unixOpenCnt structure here */ + unixInodeInfo **ppInode /* Return the unixInodeInfo object here */ ){ int rc; /* System call return code */ int fd; /* The file descriptor for pFile */ - struct unixLockKey lockKey; /* Lookup key for the unixLockInfo structure */ - struct unixFileId fileId; /* Lookup key for the unixOpenCnt struct */ + struct unixFileId fileId; /* Lookup key for the unixInodeInfo */ struct stat statbuf; /* Low-level file information */ - struct unixLockInfo *pLock = 0;/* Candidate unixLockInfo object */ - struct unixOpenCnt *pOpen; /* Candidate unixOpenCnt object */ + unixInodeInfo *pInode = 0; /* Candidate unixInodeInfo object */ assert( unixMutexHeld() ); @@ -1018,123 +855,36 @@ static int findLockInfo( } #endif - memset(&lockKey, 0, sizeof(lockKey)); - lockKey.fid.dev = statbuf.st_dev; + memset(&fileId, 0, sizeof(fileId)); + fileId.dev = statbuf.st_dev; #if OS_VXWORKS - lockKey.fid.pId = pFile->pId; + fileId.pId = pFile->pId; #else - lockKey.fid.ino = statbuf.st_ino; + fileId.ino = statbuf.st_ino; #endif -#if SQLITE_THREADSAFE && defined(__linux__) - if( threadsOverrideEachOthersLocks<0 ){ - testThreadLockingBehavior(fd); + pInode = inodeList; + while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){ + pInode = pInode->pNext; } - lockKey.tid = threadsOverrideEachOthersLocks ? 0 : pthread_self(); -#endif - fileId = lockKey.fid; - if( ppLock!=0 ){ - pLock = lockList; - while( pLock && memcmp(&lockKey, &pLock->lockKey, sizeof(lockKey)) ){ - pLock = pLock->pNext; + if( pInode==0 ){ + pInode = sqlite3_malloc( sizeof(*pInode) ); + if( pInode==0 ){ + return SQLITE_NOMEM; } - if( pLock==0 ){ - pLock = sqlite3_malloc( sizeof(*pLock) ); - if( pLock==0 ){ - rc = SQLITE_NOMEM; - goto exit_findlockinfo; - } - memcpy(&pLock->lockKey,&lockKey,sizeof(lockKey)); - pLock->nRef = 1; - pLock->cnt = 0; - pLock->locktype = 0; -#if defined(SQLITE_ENABLE_LOCKING_STYLE) - pLock->sharedByte = 0; -#endif - pLock->pNext = lockList; - pLock->pPrev = 0; - if( lockList ) lockList->pPrev = pLock; - lockList = pLock; - }else{ - pLock->nRef++; - } - *ppLock = pLock; + memset(pInode, 0, sizeof(*pInode)); + memcpy(&pInode->fileId, &fileId, sizeof(fileId)); + pInode->nRef = 1; + pInode->pNext = inodeList; + pInode->pPrev = 0; + if( inodeList ) inodeList->pPrev = pInode; + inodeList = pInode; + }else{ + pInode->nRef++; } - if( ppOpen!=0 ){ - pOpen = openList; - while( pOpen && memcmp(&fileId, &pOpen->fileId, sizeof(fileId)) ){ - pOpen = pOpen->pNext; - } - if( pOpen==0 ){ - pOpen = sqlite3_malloc( sizeof(*pOpen) ); - if( pOpen==0 ){ - releaseLockInfo(pLock); - rc = SQLITE_NOMEM; - goto exit_findlockinfo; - } - memset(pOpen, 0, sizeof(*pOpen)); - pOpen->fileId = fileId; - pOpen->nRef = 1; - pOpen->pNext = openList; - if( openList ) openList->pPrev = pOpen; - openList = pOpen; - }else{ - pOpen->nRef++; - } - *ppOpen = pOpen; - } - -exit_findlockinfo: - return rc; + *ppInode = pInode; + return SQLITE_OK; } -/* -** If we are currently in a different thread than the thread that the -** unixFile argument belongs to, then transfer ownership of the unixFile -** over to the current thread. -** -** A unixFile is only owned by a thread on systems that use LinuxThreads. -** -** Ownership transfer is only allowed if the unixFile is currently unlocked. -** If the unixFile is locked and an ownership is wrong, then return -** SQLITE_MISUSE. SQLITE_OK is returned if everything works. -*/ -#if SQLITE_THREADSAFE && defined(__linux__) -static int transferOwnership(unixFile *pFile){ - int rc; - pthread_t hSelf; - if( threadsOverrideEachOthersLocks ){ - /* Ownership transfers not needed on this system */ - return SQLITE_OK; - } - hSelf = pthread_self(); - if( pthread_equal(pFile->tid, hSelf) ){ - /* We are still in the same thread */ - OSTRACE1("No-transfer, same thread\n"); - return SQLITE_OK; - } - if( pFile->locktype!=NO_LOCK ){ - /* We cannot change ownership while we are holding a lock! */ - return SQLITE_MISUSE_BKPT; - } - OSTRACE4("Transfer ownership of %d from %d to %d\n", - pFile->h, pFile->tid, hSelf); - pFile->tid = hSelf; - if (pFile->pLock != NULL) { - releaseLockInfo(pFile->pLock); - rc = findLockInfo(pFile, &pFile->pLock, 0); - OSTRACE5("LOCK %d is now %s(%s,%d)\n", pFile->h, - locktypeName(pFile->locktype), - locktypeName(pFile->pLock->locktype), pFile->pLock->cnt); - return rc; - } else { - return SQLITE_OK; - } -} -#else /* if not SQLITE_THREADSAFE */ - /* On single-threaded builds, ownership transfer is a no-op */ -# define transferOwnership(X) SQLITE_OK -#endif /* SQLITE_THREADSAFE */ - /* ** This routine checks if there is a RESERVED lock held on the specified @@ -1150,10 +900,10 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); - unixEnterMutex(); /* Because pFile->pLock is shared across threads */ + unixEnterMutex(); /* Because pFile->pInode is shared across threads */ /* Check if a thread in this process holds such a lock */ - if( pFile->pLock->locktype>SHARED_LOCK ){ + if( pFile->pInode->eFileLock>SHARED_LOCK ){ reserved = 1; } @@ -1177,14 +927,14 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ #endif unixLeaveMutex(); - OSTRACE4("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved); + OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -1207,7 +957,7 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ -static int unixLock(sqlite3_file *id, int locktype){ +static int unixLock(sqlite3_file *id, int eFileLock){ /* The following describes the implementation of the various locks and ** lock transitions in terms of the POSIX advisory shared and exclusive ** lock primitives (called read-locks and write-locks below, to avoid @@ -1248,23 +998,23 @@ static int unixLock(sqlite3_file *id, int locktype){ */ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; - struct unixLockInfo *pLock = pFile->pLock; + unixInodeInfo *pInode = pFile->pInode; struct flock lock; int s = 0; int tErrno = 0; assert( pFile ); - OSTRACE7("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h, - locktypeName(locktype), locktypeName(pFile->locktype), - locktypeName(pLock->locktype), pLock->cnt , getpid()); + OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h, + azFileLock(eFileLock), azFileLock(pFile->eFileLock), + azFileLock(pInode->eFileLock), pInode->nShared , getpid())); /* If there is already a lock of this type or more restrictive on the ** unixFile, do nothing. Don't use the end_lock: exit path, as ** unixEnterMutex() hasn't been called yet. */ - if( pFile->locktype>=locktype ){ - OSTRACE3("LOCK %d %s ok (already held) (unix)\n", pFile->h, - locktypeName(locktype)); + if( pFile->eFileLock>=eFileLock ){ + OSTRACE(("LOCK %d %s ok (already held) (unix)\n", pFile->h, + azFileLock(eFileLock))); return SQLITE_OK; } @@ -1273,28 +1023,20 @@ static int unixLock(sqlite3_file *id, int locktype){ ** (2) SQLite never explicitly requests a pendig lock. ** (3) A shared lock is always held when a reserve lock is requested. */ - assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); - assert( locktype!=PENDING_LOCK ); - assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); + assert( eFileLock!=PENDING_LOCK ); + assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); - /* This mutex is needed because pFile->pLock is shared across threads + /* This mutex is needed because pFile->pInode is shared across threads */ unixEnterMutex(); - - /* Make sure the current thread owns the pFile. - */ - rc = transferOwnership(pFile); - if( rc!=SQLITE_OK ){ - unixLeaveMutex(); - return rc; - } - pLock = pFile->pLock; + pInode = pFile->pInode; /* If some thread using this PID has a lock via a different unixFile* ** handle that precludes the requested lock, return BUSY. */ - if( (pFile->locktype!=pLock->locktype && - (pLock->locktype>=PENDING_LOCK || locktype>SHARED_LOCK)) + if( (pFile->eFileLock!=pInode->eFileLock && + (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) ){ rc = SQLITE_BUSY; goto end_lock; @@ -1304,14 +1046,14 @@ static int unixLock(sqlite3_file *id, int locktype){ ** has a SHARED or RESERVED lock, then increment reference counts and ** return SQLITE_OK. */ - if( locktype==SHARED_LOCK && - (pLock->locktype==SHARED_LOCK || pLock->locktype==RESERVED_LOCK) ){ - assert( locktype==SHARED_LOCK ); - assert( pFile->locktype==0 ); - assert( pLock->cnt>0 ); - pFile->locktype = SHARED_LOCK; - pLock->cnt++; - pFile->pOpen->nLock++; + if( eFileLock==SHARED_LOCK && + (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ + assert( eFileLock==SHARED_LOCK ); + assert( pFile->eFileLock==0 ); + assert( pInode->nShared>0 ); + pFile->eFileLock = SHARED_LOCK; + pInode->nShared++; + pInode->nLock++; goto end_lock; } @@ -1322,10 +1064,10 @@ static int unixLock(sqlite3_file *id, int locktype){ */ lock.l_len = 1L; lock.l_whence = SEEK_SET; - if( locktype==SHARED_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktypeeFileLockh, F_SETLK, &lock); if( s==(-1) ){ @@ -1342,9 +1084,9 @@ static int unixLock(sqlite3_file *id, int locktype){ /* If control gets to this point, then actually go ahead and make ** operating system calls for the specified lock. */ - if( locktype==SHARED_LOCK ){ - assert( pLock->cnt==0 ); - assert( pLock->locktype==0 ); + if( eFileLock==SHARED_LOCK ){ + assert( pInode->nShared==0 ); + assert( pInode->eFileLock==0 ); /* Now get the read-lock */ lock.l_start = SHARED_FIRST; @@ -1373,11 +1115,11 @@ static int unixLock(sqlite3_file *id, int locktype){ pFile->lastErrno = tErrno; } }else{ - pFile->locktype = SHARED_LOCK; - pFile->pOpen->nLock++; - pLock->cnt = 1; + pFile->eFileLock = SHARED_LOCK; + pInode->nLock++; + pInode->nShared = 1; } - }else if( locktype==EXCLUSIVE_LOCK && pLock->cnt>1 ){ + }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; @@ -1386,9 +1128,9 @@ static int unixLock(sqlite3_file *id, int locktype){ ** assumed that there is a SHARED or greater lock on the file ** already. */ - assert( 0!=pFile->locktype ); + assert( 0!=pFile->eFileLock ); lock.l_type = F_WRLCK; - switch( locktype ){ + switch( eFileLock ){ case RESERVED_LOCK: lock.l_start = RESERVED_BYTE; break; @@ -1417,8 +1159,8 @@ static int unixLock(sqlite3_file *id, int locktype){ ** write operation (not a hot journal rollback). */ if( rc==SQLITE_OK - && pFile->locktype<=SHARED_LOCK - && locktype==RESERVED_LOCK + && pFile->eFileLock<=SHARED_LOCK + && eFileLock==RESERVED_LOCK ){ pFile->transCntrChng = 0; pFile->dbUpdate = 0; @@ -1428,47 +1170,17 @@ static int unixLock(sqlite3_file *id, int locktype){ if( rc==SQLITE_OK ){ - pFile->locktype = locktype; - pLock->locktype = locktype; - }else if( locktype==EXCLUSIVE_LOCK ){ - pFile->locktype = PENDING_LOCK; - pLock->locktype = PENDING_LOCK; + pFile->eFileLock = eFileLock; + pInode->eFileLock = eFileLock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; } end_lock: unixLeaveMutex(); - OSTRACE4("LOCK %d %s %s (unix)\n", pFile->h, locktypeName(locktype), - rc==SQLITE_OK ? "ok" : "failed"); - return rc; -} - -/* -** Close all file descriptors accumuated in the unixOpenCnt->pUnused list. -** If all such file descriptors are closed without error, the list is -** cleared and SQLITE_OK returned. -** -** Otherwise, if an error occurs, then successfully closed file descriptor -** entries are removed from the list, and SQLITE_IOERR_CLOSE returned. -** not deleted and SQLITE_IOERR_CLOSE returned. -*/ -static int closePendingFds(unixFile *pFile){ - int rc = SQLITE_OK; - struct unixOpenCnt *pOpen = pFile->pOpen; - UnixUnusedFd *pError = 0; - UnixUnusedFd *p; - UnixUnusedFd *pNext; - for(p=pOpen->pUnused; p; p=pNext){ - pNext = p->pNext; - if( close(p->fd) ){ - pFile->lastErrno = errno; - rc = SQLITE_IOERR_CLOSE; - p->pNext = pError; - pError = p; - }else{ - sqlite3_free(p); - } - } - pOpen->pUnused = pError; + OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); return rc; } @@ -1477,16 +1189,16 @@ static int closePendingFds(unixFile *pFile){ ** pUnused list. */ static void setPendingFd(unixFile *pFile){ - struct unixOpenCnt *pOpen = pFile->pOpen; + unixInodeInfo *pInode = pFile->pInode; UnixUnusedFd *p = pFile->pUnused; - p->pNext = pOpen->pUnused; - pOpen->pUnused = p; + p->pNext = pInode->pUnused; + pInode->pUnused = p; pFile->h = -1; pFile->pUnused = 0; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below @@ -1498,31 +1210,29 @@ static void setPendingFd(unixFile *pFile){ ** around a bug in BSD NFS lockd (also seen on MacOSX 10.3+) that fails to ** remove the write lock on a region when a read lock is set. */ -static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ +static int _posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){ unixFile *pFile = (unixFile*)id; - struct unixLockInfo *pLock; + unixInodeInfo *pInode; struct flock lock; int rc = SQLITE_OK; int h; int tErrno; /* Error code from system call errors */ assert( pFile ); - OSTRACE7("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, locktype, - pFile->locktype, pFile->pLock->locktype, pFile->pLock->cnt, getpid()); + OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock, + pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, + getpid())); - assert( locktype<=SHARED_LOCK ); - if( pFile->locktype<=locktype ){ + assert( eFileLock<=SHARED_LOCK ); + if( pFile->eFileLock<=eFileLock ){ return SQLITE_OK; } - if( CHECK_THREADID(pFile) ){ - return SQLITE_MISUSE_BKPT; - } unixEnterMutex(); h = pFile->h; - pLock = pFile->pLock; - assert( pLock->cnt!=0 ); - if( pFile->locktype>SHARED_LOCK ){ - assert( pLock->locktype==pFile->locktype ); + pInode = pFile->pInode; + assert( pInode->nShared!=0 ); + if( pFile->eFileLock>SHARED_LOCK ){ + assert( pInode->eFileLock==pFile->eFileLock ); SimulateIOErrorBenign(1); SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); @@ -1536,9 +1246,11 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ ** the file has changed and hence might not know to flush their ** cache. The use of a stale cache can lead to database corruption. */ +#if 0 assert( pFile->inNormalWrite==0 || pFile->dbUpdate==0 || pFile->transCntrChng==1 ); +#endif pFile->inNormalWrite = 0; #endif @@ -1551,7 +1263,7 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ ** 3: [RRRRW] ** 4: [RRRR.] */ - if( locktype==SHARED_LOCK ){ + if( eFileLock==SHARED_LOCK ){ if( handleNFSUnlock ){ off_t divSize = SHARED_SIZE - 1; @@ -1611,7 +1323,7 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ lock.l_start = PENDING_BYTE; lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); if( fcntl(h, F_SETLK, &lock)!=(-1) ){ - pLock->locktype = SHARED_LOCK; + pInode->eFileLock = SHARED_LOCK; }else{ tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); @@ -1621,15 +1333,13 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ goto end_unlock; } } - if( locktype==NO_LOCK ){ - struct unixOpenCnt *pOpen; - + if( eFileLock==NO_LOCK ){ /* Decrement the shared lock counter. Release the lock using an ** OS call only when all threads in this same process have released ** the lock. */ - pLock->cnt--; - if( pLock->cnt==0 ){ + pInode->nShared--; + if( pInode->nShared==0 ){ lock.l_type = F_UNLCK; lock.l_whence = SEEK_SET; lock.l_start = lock.l_len = 0L; @@ -1637,15 +1347,15 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); if( fcntl(h, F_SETLK, &lock)!=(-1) ){ - pLock->locktype = NO_LOCK; + pInode->eFileLock = NO_LOCK; }else{ tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); if( IS_LOCK_ERROR(rc) ){ pFile->lastErrno = tErrno; } - pLock->locktype = NO_LOCK; - pFile->locktype = NO_LOCK; + pInode->eFileLock = NO_LOCK; + pFile->eFileLock = NO_LOCK; } } @@ -1653,10 +1363,9 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ ** count reaches zero, close any other file descriptors whose close ** was deferred because of outstanding locks. */ - pOpen = pFile->pOpen; - pOpen->nLock--; - assert( pOpen->nLock>=0 ); - if( pOpen->nLock==0 ){ + pInode->nLock--; + assert( pInode->nLock>=0 ); + if( pInode->nLock==0 ){ int rc2 = closePendingFds(pFile); if( rc==SQLITE_OK ){ rc = rc2; @@ -1666,19 +1375,19 @@ static int _posixUnlock(sqlite3_file *id, int locktype, int handleNFSUnlock){ end_unlock: unixLeaveMutex(); - if( rc==SQLITE_OK ) pFile->locktype = locktype; + if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; return rc; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int unixUnlock(sqlite3_file *id, int locktype){ - return _posixUnlock(id, locktype, 0); +static int unixUnlock(sqlite3_file *id, int eFileLock){ + return _posixUnlock(id, eFileLock, 0); } /* @@ -1719,7 +1428,7 @@ static int closeUnixFile(sqlite3_file *id){ pFile->pId = 0; } #endif - OSTRACE2("CLOSE %-3d\n", pFile->h); + OSTRACE(("CLOSE %-3d\n", pFile->h)); OpenCounter(-1); sqlite3_free(pFile->pUnused); memset(pFile, 0, sizeof(unixFile)); @@ -1736,16 +1445,15 @@ static int unixClose(sqlite3_file *id){ unixFile *pFile = (unixFile *)id; unixUnlock(id, NO_LOCK); unixEnterMutex(); - if( pFile->pOpen && pFile->pOpen->nLock ){ + if( pFile->pInode && pFile->pInode->nLock ){ /* If there are outstanding locks, do not actually close the file just ** yet because that would clear those locks. Instead, add the file - ** descriptor to pOpen->pUnused list. It will be automatically closed + ** descriptor to pInode->pUnused list. It will be automatically closed ** when the last lock is cleared. */ setPendingFd(pFile); } - releaseLockInfo(pFile->pLock); - releaseOpenCnt(pFile->pOpen); + releaseInodeInfo(pFile); rc = closeUnixFile(id); unixLeaveMutex(); } @@ -1844,7 +1552,7 @@ static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { assert( pFile ); /* Check if a thread in this process holds such a lock */ - if( pFile->locktype>SHARED_LOCK ){ + if( pFile->eFileLock>SHARED_LOCK ){ /* Either this connection or some other connection in the same process ** holds a lock on the file. No need to check further. */ reserved = 1; @@ -1853,13 +1561,13 @@ static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { const char *zLockFile = (const char*)pFile->lockingContext; reserved = access(zLockFile, 0)==0; } - OSTRACE4("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved); + OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -1885,7 +1593,7 @@ static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { ** With dotfile locking, we really only support state (4): EXCLUSIVE. ** But we track the other locking levels internally. */ -static int dotlockLock(sqlite3_file *id, int locktype) { +static int dotlockLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int fd; char *zLockFile = (char *)pFile->lockingContext; @@ -1895,8 +1603,8 @@ static int dotlockLock(sqlite3_file *id, int locktype) { /* If we have any lock, then the lock file already exists. All we have ** to do is adjust our internal record of the lock level. */ - if( pFile->locktype > NO_LOCK ){ - pFile->locktype = locktype; + if( pFile->eFileLock > NO_LOCK ){ + pFile->eFileLock = eFileLock; #if !OS_VXWORKS /* Always update the timestamp on the old file */ utimes(zLockFile, NULL); @@ -1925,12 +1633,12 @@ static int dotlockLock(sqlite3_file *id, int locktype) { } /* got it, set the type and return ok */ - pFile->locktype = locktype; + pFile->eFileLock = eFileLock; return rc; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below @@ -1938,30 +1646,30 @@ static int dotlockLock(sqlite3_file *id, int locktype) { ** ** When the locking level reaches NO_LOCK, delete the lock file. */ -static int dotlockUnlock(sqlite3_file *id, int locktype) { +static int dotlockUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; char *zLockFile = (char *)pFile->lockingContext; assert( pFile ); - OSTRACE5("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, locktype, - pFile->locktype, getpid()); - assert( locktype<=SHARED_LOCK ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock, + pFile->eFileLock, getpid())); + assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ - if( pFile->locktype==locktype ){ + if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* To downgrade to shared, simply update our internal notion of the ** lock state. No need to mess with the file on disk. */ - if( locktype==SHARED_LOCK ){ - pFile->locktype = SHARED_LOCK; + if( eFileLock==SHARED_LOCK ){ + pFile->eFileLock = SHARED_LOCK; return SQLITE_OK; } /* To fully unlock the database, delete the lock file */ - assert( locktype==NO_LOCK ); + assert( eFileLock==NO_LOCK ); if( unlink(zLockFile) ){ int rc = 0; int tErrno = errno; @@ -1973,7 +1681,7 @@ static int dotlockUnlock(sqlite3_file *id, int locktype) { } return rc; } - pFile->locktype = NO_LOCK; + pFile->eFileLock = NO_LOCK; return SQLITE_OK; } @@ -2026,7 +1734,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ assert( pFile ); /* Check if a thread in this process holds such a lock */ - if( pFile->locktype>SHARED_LOCK ){ + if( pFile->eFileLock>SHARED_LOCK ){ reserved = 1; } @@ -2057,7 +1765,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ } } } - OSTRACE4("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved); + OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved)); #ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ @@ -2070,7 +1778,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -2098,7 +1806,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ -static int flockLock(sqlite3_file *id, int locktype) { +static int flockLock(sqlite3_file *id, int eFileLock) { int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; @@ -2106,8 +1814,8 @@ static int flockLock(sqlite3_file *id, int locktype) { /* if we already have a lock, it is exclusive. ** Just adjust level and punt on outta here. */ - if (pFile->locktype > NO_LOCK) { - pFile->locktype = locktype; + if (pFile->eFileLock > NO_LOCK) { + pFile->eFileLock = eFileLock; return SQLITE_OK; } @@ -2122,10 +1830,10 @@ static int flockLock(sqlite3_file *id, int locktype) { } } else { /* got it, set the type and return ok */ - pFile->locktype = locktype; + pFile->eFileLock = eFileLock; } - OSTRACE4("LOCK %d %s %s (flock)\n", pFile->h, locktypeName(locktype), - rc==SQLITE_OK ? "ok" : "failed"); + OSTRACE(("LOCK %d %s %s (flock)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); #ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ rc = SQLITE_BUSY; @@ -2136,28 +1844,28 @@ static int flockLock(sqlite3_file *id, int locktype) { /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int flockUnlock(sqlite3_file *id, int locktype) { +static int flockUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; assert( pFile ); - OSTRACE5("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, locktype, - pFile->locktype, getpid()); - assert( locktype<=SHARED_LOCK ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock, + pFile->eFileLock, getpid())); + assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ - if( pFile->locktype==locktype ){ + if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* shared can just be set because we always have an exclusive */ - if (locktype==SHARED_LOCK) { - pFile->locktype = locktype; + if (eFileLock==SHARED_LOCK) { + pFile->eFileLock = eFileLock; return SQLITE_OK; } @@ -2177,7 +1885,7 @@ static int flockUnlock(sqlite3_file *id, int locktype) { return r; } else { - pFile->locktype = NO_LOCK; + pFile->eFileLock = NO_LOCK; return SQLITE_OK; } } @@ -2225,13 +1933,13 @@ static int semCheckReservedLock(sqlite3_file *id, int *pResOut) { assert( pFile ); /* Check if a thread in this process holds such a lock */ - if( pFile->locktype>SHARED_LOCK ){ + if( pFile->eFileLock>SHARED_LOCK ){ reserved = 1; } /* Otherwise see if some other process holds it. */ if( !reserved ){ - sem_t *pSem = pFile->pOpen->pSem; + sem_t *pSem = pFile->pInode->pSem; struct stat statBuf; if( sem_trywait(pSem)==-1 ){ @@ -2241,21 +1949,21 @@ static int semCheckReservedLock(sqlite3_file *id, int *pResOut) { pFile->lastErrno = tErrno; } else { /* someone else has the lock when we are in NO_LOCK */ - reserved = (pFile->locktype < SHARED_LOCK); + reserved = (pFile->eFileLock < SHARED_LOCK); } }else{ /* we could have it if we want it */ sem_post(pSem); } } - OSTRACE4("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved); + OSTRACE(("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -2283,16 +1991,16 @@ static int semCheckReservedLock(sqlite3_file *id, int *pResOut) { ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ -static int semLock(sqlite3_file *id, int locktype) { +static int semLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int fd; - sem_t *pSem = pFile->pOpen->pSem; + sem_t *pSem = pFile->pInode->pSem; int rc = SQLITE_OK; /* if we already have a lock, it is exclusive. ** Just adjust level and punt on outta here. */ - if (pFile->locktype > NO_LOCK) { - pFile->locktype = locktype; + if (pFile->eFileLock > NO_LOCK) { + pFile->eFileLock = eFileLock; rc = SQLITE_OK; goto sem_end_lock; } @@ -2304,37 +2012,37 @@ static int semLock(sqlite3_file *id, int locktype) { } /* got it, set the type and return ok */ - pFile->locktype = locktype; + pFile->eFileLock = eFileLock; sem_end_lock: return rc; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int semUnlock(sqlite3_file *id, int locktype) { +static int semUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; - sem_t *pSem = pFile->pOpen->pSem; + sem_t *pSem = pFile->pInode->pSem; assert( pFile ); assert( pSem ); - OSTRACE5("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, locktype, - pFile->locktype, getpid()); - assert( locktype<=SHARED_LOCK ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock, + pFile->eFileLock, getpid())); + assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ - if( pFile->locktype==locktype ){ + if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* shared can just be set because we always have an exclusive */ - if (locktype==SHARED_LOCK) { - pFile->locktype = locktype; + if (eFileLock==SHARED_LOCK) { + pFile->eFileLock = eFileLock; return SQLITE_OK; } @@ -2347,7 +2055,7 @@ static int semUnlock(sqlite3_file *id, int locktype) { } return rc; } - pFile->locktype = NO_LOCK; + pFile->eFileLock = NO_LOCK; return SQLITE_OK; } @@ -2360,8 +2068,7 @@ static int semClose(sqlite3_file *id) { semUnlock(id, NO_LOCK); assert( pFile ); unixEnterMutex(); - releaseLockInfo(pFile->pLock); - releaseOpenCnt(pFile->pOpen); + releaseInodeInfo(pFile); unixLeaveMutex(); closeUnixFile(id); } @@ -2430,15 +2137,15 @@ static int afpSetLock( pb.length = length; pb.fd = pFile->h; - OSTRACE6("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", + OSTRACE(("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""), - offset, length); + offset, length)); err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); if ( err==-1 ) { int rc; int tErrno = errno; - OSTRACE4("AFPSETLOCK failed to fsctl() '%s' %d %s\n", - path, tErrno, strerror(tErrno)); + OSTRACE(("AFPSETLOCK failed to fsctl() '%s' %d %s\n", + path, tErrno, strerror(tErrno))); #ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS rc = SQLITE_BUSY; #else @@ -2473,10 +2180,10 @@ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ *pResOut = 1; return SQLITE_OK; } - unixEnterMutex(); /* Because pFile->pLock is shared across threads */ + unixEnterMutex(); /* Because pFile->pInode is shared across threads */ /* Check if a thread in this process holds such a lock */ - if( pFile->pLock->locktype>SHARED_LOCK ){ + if( pFile->pInode->eFileLock>SHARED_LOCK ){ reserved = 1; } @@ -2499,14 +2206,14 @@ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ } unixLeaveMutex(); - OSTRACE4("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved); + OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -2529,24 +2236,24 @@ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ -static int afpLock(sqlite3_file *id, int locktype){ +static int afpLock(sqlite3_file *id, int eFileLock){ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; - struct unixLockInfo *pLock = pFile->pLock; + unixInodeInfo *pInode = pFile->pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; assert( pFile ); - OSTRACE7("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h, - locktypeName(locktype), locktypeName(pFile->locktype), - locktypeName(pLock->locktype), pLock->cnt , getpid()); + OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h, + azFileLock(eFileLock), azFileLock(pFile->eFileLock), + azFileLock(pInode->eFileLock), pInode->nShared , getpid())); /* If there is already a lock of this type or more restrictive on the ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as ** unixEnterMutex() hasn't been called yet. */ - if( pFile->locktype>=locktype ){ - OSTRACE3("LOCK %d %s ok (already held) (afp)\n", pFile->h, - locktypeName(locktype)); + if( pFile->eFileLock>=eFileLock ){ + OSTRACE(("LOCK %d %s ok (already held) (afp)\n", pFile->h, + azFileLock(eFileLock))); return SQLITE_OK; } @@ -2555,28 +2262,20 @@ static int afpLock(sqlite3_file *id, int locktype){ ** (2) SQLite never explicitly requests a pendig lock. ** (3) A shared lock is always held when a reserve lock is requested. */ - assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); - assert( locktype!=PENDING_LOCK ); - assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); + assert( eFileLock!=PENDING_LOCK ); + assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); - /* This mutex is needed because pFile->pLock is shared across threads + /* This mutex is needed because pFile->pInode is shared across threads */ unixEnterMutex(); - - /* Make sure the current thread owns the pFile. - */ - rc = transferOwnership(pFile); - if( rc!=SQLITE_OK ){ - unixLeaveMutex(); - return rc; - } - pLock = pFile->pLock; + pInode = pFile->pInode; /* If some thread using this PID has a lock via a different unixFile* ** handle that precludes the requested lock, return BUSY. */ - if( (pFile->locktype!=pLock->locktype && - (pLock->locktype>=PENDING_LOCK || locktype>SHARED_LOCK)) + if( (pFile->eFileLock!=pInode->eFileLock && + (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) ){ rc = SQLITE_BUSY; goto afp_end_lock; @@ -2586,14 +2285,14 @@ static int afpLock(sqlite3_file *id, int locktype){ ** has a SHARED or RESERVED lock, then increment reference counts and ** return SQLITE_OK. */ - if( locktype==SHARED_LOCK && - (pLock->locktype==SHARED_LOCK || pLock->locktype==RESERVED_LOCK) ){ - assert( locktype==SHARED_LOCK ); - assert( pFile->locktype==0 ); - assert( pLock->cnt>0 ); - pFile->locktype = SHARED_LOCK; - pLock->cnt++; - pFile->pOpen->nLock++; + if( eFileLock==SHARED_LOCK && + (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ + assert( eFileLock==SHARED_LOCK ); + assert( pFile->eFileLock==0 ); + assert( pInode->nShared>0 ); + pFile->eFileLock = SHARED_LOCK; + pInode->nShared++; + pInode->nLock++; goto afp_end_lock; } @@ -2601,8 +2300,8 @@ static int afpLock(sqlite3_file *id, int locktype){ ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will ** be released. */ - if( locktype==SHARED_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktypeeFileLockdbPath, pFile, PENDING_BYTE, 1, 1); @@ -2615,20 +2314,20 @@ static int afpLock(sqlite3_file *id, int locktype){ /* If control gets to this point, then actually go ahead and make ** operating system calls for the specified lock. */ - if( locktype==SHARED_LOCK ){ + if( eFileLock==SHARED_LOCK ){ int lrc1, lrc2, lrc1Errno; long lk, mask; - assert( pLock->cnt==0 ); - assert( pLock->locktype==0 ); + assert( pInode->nShared==0 ); + assert( pInode->eFileLock==0 ); mask = (sizeof(long)==8) ? LARGEST_INT64 : 0x7fffffff; /* Now get the read-lock SHARED_LOCK */ /* note that the quality of the randomness doesn't matter that much */ lk = random(); - pLock->sharedByte = (lk & mask)%(SHARED_SIZE - 1); + pInode->sharedByte = (lk & mask)%(SHARED_SIZE - 1); lrc1 = afpSetLock(context->dbPath, pFile, - SHARED_FIRST+pLock->sharedByte, 1, 1); + SHARED_FIRST+pInode->sharedByte, 1, 1); if( IS_LOCK_ERROR(lrc1) ){ lrc1Errno = pFile->lastErrno; } @@ -2645,11 +2344,11 @@ static int afpLock(sqlite3_file *id, int locktype){ } else if( lrc1 != SQLITE_OK ) { rc = lrc1; } else { - pFile->locktype = SHARED_LOCK; - pFile->pOpen->nLock++; - pLock->cnt = 1; + pFile->eFileLock = SHARED_LOCK; + pInode->nLock++; + pInode->nShared = 1; } - }else if( locktype==EXCLUSIVE_LOCK && pLock->cnt>1 ){ + }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; @@ -2659,28 +2358,28 @@ static int afpLock(sqlite3_file *id, int locktype){ ** already. */ int failed = 0; - assert( 0!=pFile->locktype ); - if (locktype >= RESERVED_LOCK && pFile->locktype < RESERVED_LOCK) { + assert( 0!=pFile->eFileLock ); + if (eFileLock >= RESERVED_LOCK && pFile->eFileLock < RESERVED_LOCK) { /* Acquire a RESERVED lock */ failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); if( !failed ){ context->reserved = 1; } } - if (!failed && locktype == EXCLUSIVE_LOCK) { + if (!failed && eFileLock == EXCLUSIVE_LOCK) { /* Acquire an EXCLUSIVE lock */ /* Remove the shared lock before trying the range. we'll need to ** reestablish the shared lock if we can't get the afpUnlock */ if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + - pLock->sharedByte, 1, 0)) ){ + pInode->sharedByte, 1, 0)) ){ int failed2 = SQLITE_OK; /* now attemmpt to get the exclusive lock range */ failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 1); if( failed && (failed2 = afpSetLock(context->dbPath, pFile, - SHARED_FIRST + pLock->sharedByte, 1, 1)) ){ + SHARED_FIRST + pInode->sharedByte, 1, 1)) ){ /* Can't reestablish the shared lock. Sqlite can't deal, this is ** a critical I/O error */ @@ -2698,31 +2397,31 @@ static int afpLock(sqlite3_file *id, int locktype){ } if( rc==SQLITE_OK ){ - pFile->locktype = locktype; - pLock->locktype = locktype; - }else if( locktype==EXCLUSIVE_LOCK ){ - pFile->locktype = PENDING_LOCK; - pLock->locktype = PENDING_LOCK; + pFile->eFileLock = eFileLock; + pInode->eFileLock = eFileLock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; } afp_end_lock: unixLeaveMutex(); - OSTRACE4("LOCK %d %s %s (afp)\n", pFile->h, locktypeName(locktype), - rc==SQLITE_OK ? "ok" : "failed"); + OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); return rc; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int afpUnlock(sqlite3_file *id, int locktype) { +static int afpUnlock(sqlite3_file *id, int eFileLock) { int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; - struct unixLockInfo *pLock; + unixInodeInfo *pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; int skipShared = 0; #ifdef SQLITE_TEST @@ -2730,21 +2429,19 @@ static int afpUnlock(sqlite3_file *id, int locktype) { #endif assert( pFile ); - OSTRACE7("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, locktype, - pFile->locktype, pFile->pLock->locktype, pFile->pLock->cnt, getpid()); + OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock, + pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, + getpid())); - assert( locktype<=SHARED_LOCK ); - if( pFile->locktype<=locktype ){ + assert( eFileLock<=SHARED_LOCK ); + if( pFile->eFileLock<=eFileLock ){ return SQLITE_OK; } - if( CHECK_THREADID(pFile) ){ - return SQLITE_MISUSE_BKPT; - } unixEnterMutex(); - pLock = pFile->pLock; - assert( pLock->cnt!=0 ); - if( pFile->locktype>SHARED_LOCK ){ - assert( pLock->locktype==pFile->locktype ); + pInode = pFile->pInode; + assert( pInode->nShared!=0 ); + if( pFile->eFileLock>SHARED_LOCK ){ + assert( pInode->eFileLock==pFile->eFileLock ); SimulateIOErrorBenign(1); SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); @@ -2764,38 +2461,38 @@ static int afpUnlock(sqlite3_file *id, int locktype) { pFile->inNormalWrite = 0; #endif - if( pFile->locktype==EXCLUSIVE_LOCK ){ + if( pFile->eFileLock==EXCLUSIVE_LOCK ){ rc = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0); - if( rc==SQLITE_OK && (locktype==SHARED_LOCK || pLock->cnt>1) ){ + if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1) ){ /* only re-establish the shared lock if necessary */ - int sharedLockByte = SHARED_FIRST+pLock->sharedByte; + int sharedLockByte = SHARED_FIRST+pInode->sharedByte; rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 1); } else { skipShared = 1; } } - if( rc==SQLITE_OK && pFile->locktype>=PENDING_LOCK ){ + if( rc==SQLITE_OK && pFile->eFileLock>=PENDING_LOCK ){ rc = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); } - if( rc==SQLITE_OK && pFile->locktype>=RESERVED_LOCK && context->reserved ){ + if( rc==SQLITE_OK && pFile->eFileLock>=RESERVED_LOCK && context->reserved ){ rc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); if( !rc ){ context->reserved = 0; } } - if( rc==SQLITE_OK && (locktype==SHARED_LOCK || pLock->cnt>1)){ - pLock->locktype = SHARED_LOCK; + if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1)){ + pInode->eFileLock = SHARED_LOCK; } } - if( rc==SQLITE_OK && locktype==NO_LOCK ){ + if( rc==SQLITE_OK && eFileLock==NO_LOCK ){ /* Decrement the shared lock counter. Release the lock using an ** OS call only when all threads in this same process have released ** the lock. */ - unsigned long long sharedLockByte = SHARED_FIRST+pLock->sharedByte; - pLock->cnt--; - if( pLock->cnt==0 ){ + unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; + pInode->nShared--; + if( pInode->nShared==0 ){ SimulateIOErrorBenign(1); SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); @@ -2803,23 +2500,21 @@ static int afpUnlock(sqlite3_file *id, int locktype) { rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); } if( !rc ){ - pLock->locktype = NO_LOCK; - pFile->locktype = NO_LOCK; + pInode->eFileLock = NO_LOCK; + pFile->eFileLock = NO_LOCK; } } if( rc==SQLITE_OK ){ - struct unixOpenCnt *pOpen = pFile->pOpen; - - pOpen->nLock--; - assert( pOpen->nLock>=0 ); - if( pOpen->nLock==0 ){ + pInode->nLock--; + assert( pInode->nLock>=0 ); + if( pInode->nLock==0 ){ rc = closePendingFds(pFile); } } } unixLeaveMutex(); - if( rc==SQLITE_OK ) pFile->locktype = locktype; + if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; return rc; } @@ -2832,16 +2527,15 @@ static int afpClose(sqlite3_file *id) { unixFile *pFile = (unixFile*)id; afpUnlock(id, NO_LOCK); unixEnterMutex(); - if( pFile->pOpen && pFile->pOpen->nLock ){ + if( pFile->pInode && pFile->pInode->nLock ){ /* If there are outstanding locks, do not actually close the file just ** yet because that would clear those locks. Instead, add the file - ** descriptor to pOpen->aPending. It will be automatically closed when + ** descriptor to pInode->aPending. It will be automatically closed when ** the last lock is cleared. */ setPendingFd(pFile); } - releaseLockInfo(pFile->pLock); - releaseOpenCnt(pFile->pOpen); + releaseInodeInfo(pFile); sqlite3_free(pFile->lockingContext); rc = closeUnixFile(id); unixLeaveMutex(); @@ -2864,14 +2558,14 @@ static int afpClose(sqlite3_file *id) { #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE /* - ** Lower the locking level on file descriptor pFile to locktype. locktype + ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int nfsUnlock(sqlite3_file *id, int locktype){ - return _posixUnlock(id, locktype, 1); +static int nfsUnlock(sqlite3_file *id, int eFileLock){ + return _posixUnlock(id, eFileLock, 1); } #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ @@ -2935,7 +2629,7 @@ static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ if( got<0 ){ ((unixFile*)id)->lastErrno = errno; } - OSTRACE5("READ %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED); + OSTRACE(("READ %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED)); return got; } @@ -2956,10 +2650,12 @@ static int unixRead( /* If this is a database file (not a journal, master-journal or temp ** file), the bytes in the locking range should never be read or written. */ +#if 0 assert( pFile->pUnused==0 || offset>=PENDING_BYTE+512 || offset+amt<=PENDING_BYTE ); +#endif got = seekAndRead(pFile, offset, pBuf, amt); if( got==amt ){ @@ -3009,7 +2705,7 @@ static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ ((unixFile*)id)->lastErrno = errno; } - OSTRACE5("WRITE %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED); + OSTRACE(("WRITE %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED)); return got; } @@ -3031,10 +2727,12 @@ static int unixWrite( /* If this is a database file (not a journal, master-journal or temp ** file), the bytes in the locking range should never be read or written. */ +#if 0 assert( pFile->pUnused==0 || offset>=PENDING_BYTE+512 || offset+amt<=PENDING_BYTE ); +#endif #ifndef NDEBUG /* If we are doing a normal write to a database file (as opposed to @@ -3233,7 +2931,7 @@ static int unixSync(sqlite3_file *id, int flags){ SimulateDiskfullError( return SQLITE_FULL ); assert( pFile ); - OSTRACE2("SYNC %-3d\n", pFile->h); + OSTRACE(("SYNC %-3d\n", pFile->h)); rc = full_fsync(pFile->h, isFullsync, isDataOnly); SimulateIOError( rc=1 ); if( rc ){ @@ -3242,8 +2940,8 @@ static int unixSync(sqlite3_file *id, int flags){ } if( pFile->dirfd>=0 ){ int err; - OSTRACE4("DIRSYNC %-3d (have_fullfsync=%d fullsync=%d)\n", pFile->dirfd, - HAVE_FULLFSYNC, isFullsync); + OSTRACE(("DIRSYNC %-3d (have_fullfsync=%d fullsync=%d)\n", pFile->dirfd, + HAVE_FULLFSYNC, isFullsync)); #ifndef SQLITE_DISABLE_DIRSYNC /* The directory sync is only attempted if full_fsync is ** turned off or unavailable. If a full_fsync occurred above, @@ -3315,7 +3013,7 @@ static int unixFileSize(sqlite3_file *id, i64 *pSize){ } *pSize = buf.st_size; - /* When opening a zero-size database, the findLockInfo() procedure + /* When opening a zero-size database, the findInodeInfo() procedure ** writes a single byte into that file in order to work around a bug ** in the OS-X msdos filesystem. In order to avoid problems with upper ** layers, we need to report this file size as zero even though it is @@ -3342,13 +3040,21 @@ static int proxyFileControl(sqlite3_file*,int,void*); static int unixFileControl(sqlite3_file *id, int op, void *pArg){ switch( op ){ case SQLITE_FCNTL_LOCKSTATE: { - *(int*)pArg = ((unixFile*)id)->locktype; + *(int*)pArg = ((unixFile*)id)->eFileLock; return SQLITE_OK; } case SQLITE_LAST_ERRNO: { *(int*)pArg = ((unixFile*)id)->lastErrno; return SQLITE_OK; } + case SQLITE_FCNTL_SIZE_HINT: { +#if 0 /* No performance advantage seen on Linux */ + sqlite3_int64 szFile = *(sqlite3_int64*)pArg; + unixFile *pFile = (unixFile*)id; + ftruncate(pFile->h, szFile); +#endif + return SQLITE_OK; + } #ifndef NDEBUG /* The pager calls this method to signal that it has done ** a rollback and that the database is therefore unchanged and @@ -3393,6 +3099,609 @@ static int unixDeviceCharacteristics(sqlite3_file *NotUsed){ return 0; } +#ifndef SQLITE_OMIT_WAL + + +/* +** Object used to represent an shared memory buffer. +** +** When multiple threads all reference the same wal-index, each thread +** has its own unixShm object, but they all point to a single instance +** of this unixShmNode object. In other words, each wal-index is opened +** only once per process. +** +** Each unixShmNode object is connected to a single unixInodeInfo object. +** We could coalesce this object into unixInodeInfo, but that would mean +** every open file that does not use shared memory (in other words, most +** open files) would have to carry around this extra information. So +** the unixInodeInfo object contains a pointer to this unixShmNode object +** and the unixShmNode object is created only when needed. +** +** unixMutexHeld() must be true when creating or destroying +** this object or while reading or writing the following fields: +** +** nRef +** +** The following fields are read-only after the object is created: +** +** fid +** zFilename +** +** Either unixShmNode.mutex must be held or unixShmNode.nRef==0 and +** unixMutexHeld() is true when reading or writing any other field +** in this structure. +*/ +struct unixShmNode { + unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ + sqlite3_mutex *mutex; /* Mutex to access this object */ + char *zFilename; /* Name of the mmapped file */ + int h; /* Open file descriptor */ + int szRegion; /* Size of shared-memory regions */ + int nRegion; /* Size of array apRegion */ + char **apRegion; /* Array of mapped shared-memory regions */ + int nRef; /* Number of unixShm objects pointing to this */ + unixShm *pFirst; /* All unixShm objects pointing to this */ +#ifdef SQLITE_DEBUG + u8 exclMask; /* Mask of exclusive locks held */ + u8 sharedMask; /* Mask of shared locks held */ + u8 nextShmId; /* Next available unixShm.id value */ +#endif +}; + +/* +** Structure used internally by this VFS to record the state of an +** open shared memory connection. +** +** The following fields are initialized when this object is created and +** are read-only thereafter: +** +** unixShm.pFile +** unixShm.id +** +** All other fields are read/write. The unixShm.pFile->mutex must be held +** while accessing any read/write fields. +*/ +struct unixShm { + unixShmNode *pShmNode; /* The underlying unixShmNode object */ + unixShm *pNext; /* Next unixShm with the same unixShmNode */ + u8 hasMutex; /* True if holding the unixShmNode mutex */ + u16 sharedMask; /* Mask of shared locks held */ + u16 exclMask; /* Mask of exclusive locks held */ +#ifdef SQLITE_DEBUG + u8 id; /* Id of this connection within its unixShmNode */ +#endif +}; + +/* +** Constants used for locking +*/ +#define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ +#define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ + +/* +** Apply posix advisory locks for all bytes from ofst through ofst+n-1. +** +** Locks block if the mask is exactly UNIX_SHM_C and are non-blocking +** otherwise. +*/ +static int unixShmSystemLock( + unixShmNode *pShmNode, /* Apply locks to this open shared-memory segment */ + int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */ + int ofst, /* First byte of the locking range */ + int n /* Number of bytes to lock */ +){ + struct flock f; /* The posix advisory locking structure */ + int rc = SQLITE_OK; /* Result code form fcntl() */ + + /* Access to the unixShmNode object is serialized by the caller */ + assert( sqlite3_mutex_held(pShmNode->mutex) || pShmNode->nRef==0 ); + + /* Shared locks never span more than one byte */ + assert( n==1 || lockType!=F_RDLCK ); + + /* Locks are within range */ + assert( n>=1 && nh, F_SETLK, &f); + rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY; + + /* Update the global lock state and do debug tracing */ +#ifdef SQLITE_DEBUG + { u16 mask; + OSTRACE(("SHM-LOCK ")); + mask = (1<<(ofst+n)) - (1<exclMask &= ~mask; + pShmNode->sharedMask &= ~mask; + }else if( lockType==F_RDLCK ){ + OSTRACE(("read-lock %d ok", ofst)); + pShmNode->exclMask &= ~mask; + pShmNode->sharedMask |= mask; + }else{ + assert( lockType==F_WRLCK ); + OSTRACE(("write-lock %d ok", ofst)); + pShmNode->exclMask |= mask; + pShmNode->sharedMask &= ~mask; + } + }else{ + if( lockType==F_UNLCK ){ + OSTRACE(("unlock %d failed", ofst)); + }else if( lockType==F_RDLCK ){ + OSTRACE(("read-lock failed")); + }else{ + assert( lockType==F_WRLCK ); + OSTRACE(("write-lock %d failed", ofst)); + } + } + OSTRACE((" - afterwards %03x,%03x\n", + pShmNode->sharedMask, pShmNode->exclMask)); + } +#endif + + return rc; +} + + +/* +** Purge the unixShmNodeList list of all entries with unixShmNode.nRef==0. +** +** This is not a VFS shared-memory method; it is a utility function called +** by VFS shared-memory methods. +*/ +static void unixShmPurge(unixFile *pFd){ + unixShmNode *p = pFd->pInode->pShmNode; + assert( unixMutexHeld() ); + if( p && p->nRef==0 ){ + int i; + assert( p->pInode==pFd->pInode ); + if( p->mutex ) sqlite3_mutex_free(p->mutex); + for(i=0; inRegion; i++){ + munmap(p->apRegion[i], p->szRegion); + } + sqlite3_free(p->apRegion); + if( p->h>=0 ) close(p->h); + p->pInode->pShmNode = 0; + sqlite3_free(p); + } +} + +/* +** Open a shared-memory area associated with open database file pDbFd. +** This particular implementation uses mmapped files. +** +** The file used to implement shared-memory is in the same directory +** as the open database file and has the same name as the open database +** file with the "-shm" suffix added. For example, if the database file +** is "/home/user1/config.db" then the file that is created and mmapped +** for shared memory will be called "/home/user1/config.db-shm". +** +** Another approach to is to use files in /dev/shm or /dev/tmp or an +** some other tmpfs mount. But if a file in a different directory +** from the database file is used, then differing access permissions +** or a chroot() might cause two different processes on the same +** database to end up using different files for shared memory - +** meaning that their memory would not really be shared - resulting +** in database corruption. Nevertheless, this tmpfs file usage +** can be enabled at compile-time using -DSQLITE_SHM_DIRECTORY="/dev/shm" +** or the equivalent. The use of the SQLITE_SHM_DIRECTORY compile-time +** option results in an incompatible build of SQLite; builds of SQLite +** that with differing SQLITE_SHM_DIRECTORY settings attempt to use the +** same database file at the same time, database corruption will likely +** result. The SQLITE_SHM_DIRECTORY compile-time option is considered +** "unsupported" and may go away in a future SQLite release. +** +** When opening a new shared-memory file, if no other instances of that +** file are currently open, in this process or in other processes, then +** the file must be truncated to zero length or have its header cleared. +*/ +static int unixOpenSharedMemory(unixFile *pDbFd){ + struct unixShm *p = 0; /* The connection to be opened */ + struct unixShmNode *pShmNode; /* The underlying mmapped file */ + int rc; /* Result code */ + unixInodeInfo *pInode; /* The inode of fd */ + char *zShmFilename; /* Name of the file used for SHM */ + int nShmFilename; /* Size of the SHM filename in bytes */ + + /* Allocate space for the new unixShm object. */ + p = sqlite3_malloc( sizeof(*p) ); + if( p==0 ) return SQLITE_NOMEM; + memset(p, 0, sizeof(*p)); + assert( pDbFd->pShm==0 ); + + /* Check to see if a unixShmNode object already exists. Reuse an existing + ** one if present. Create a new one if necessary. + */ + unixEnterMutex(); + pInode = pDbFd->pInode; + pShmNode = pInode->pShmNode; + if( pShmNode==0 ){ + struct stat sStat; /* fstat() info for database file */ + + /* Call fstat() to figure out the permissions on the database file. If + ** a new *-shm file is created, an attempt will be made to create it + ** with the same permissions. The actual permissions the file is created + ** with are subject to the current umask setting. + */ + if( fstat(pDbFd->h, &sStat) ){ + rc = SQLITE_IOERR_FSTAT; + goto shm_open_err; + } + +#ifdef SQLITE_SHM_DIRECTORY + nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 30; +#else + nShmFilename = 5 + (int)strlen(pDbFd->zPath); +#endif + pShmNode = sqlite3_malloc( sizeof(*pShmNode) + nShmFilename ); + if( pShmNode==0 ){ + rc = SQLITE_NOMEM; + goto shm_open_err; + } + memset(pShmNode, 0, sizeof(*pShmNode)); + zShmFilename = pShmNode->zFilename = (char*)&pShmNode[1]; +#ifdef SQLITE_SHM_DIRECTORY + sqlite3_snprintf(nShmFilename, zShmFilename, + SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x", + (u32)sStat.st_ino, (u32)sStat.st_dev); +#else + sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", pDbFd->zPath); +#endif + pShmNode->h = -1; + pDbFd->pInode->pShmNode = pShmNode; + pShmNode->pInode = pDbFd->pInode; + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_NOMEM; + goto shm_open_err; + } + + pShmNode->h = open(zShmFilename, O_RDWR|O_CREAT, (sStat.st_mode & 0777)); + if( pShmNode->h<0 ){ + rc = SQLITE_CANTOPEN_BKPT; + goto shm_open_err; + } + + /* Check to see if another process is holding the dead-man switch. + ** If not, truncate the file to zero length. + */ + rc = SQLITE_OK; + if( unixShmSystemLock(pShmNode, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){ + if( ftruncate(pShmNode->h, 0) ){ + rc = SQLITE_IOERR_SHMOPEN; + } + } + if( rc==SQLITE_OK ){ + rc = unixShmSystemLock(pShmNode, F_RDLCK, UNIX_SHM_DMS, 1); + } + if( rc ) goto shm_open_err; + } + + /* Make the new connection a child of the unixShmNode */ + p->pShmNode = pShmNode; +#ifdef SQLITE_DEBUG + p->id = pShmNode->nextShmId++; +#endif + pShmNode->nRef++; + pDbFd->pShm = p; + unixLeaveMutex(); + + /* The reference count on pShmNode has already been incremented under + ** the cover of the unixEnterMutex() mutex and the pointer from the + ** new (struct unixShm) object to the pShmNode has been set. All that is + ** left to do is to link the new object into the linked list starting + ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex + ** mutex. + */ + sqlite3_mutex_enter(pShmNode->mutex); + p->pNext = pShmNode->pFirst; + pShmNode->pFirst = p; + sqlite3_mutex_leave(pShmNode->mutex); + return SQLITE_OK; + + /* Jump here on any error */ +shm_open_err: + unixShmPurge(pDbFd); /* This call frees pShmNode if required */ + sqlite3_free(p); + unixLeaveMutex(); + return rc; +} + +/* +** This function is called to obtain a pointer to region iRegion of the +** shared-memory associated with the database file fd. Shared-memory regions +** are numbered starting from zero. Each shared-memory region is szRegion +** bytes in size. +** +** If an error occurs, an error code is returned and *pp is set to NULL. +** +** Otherwise, if the bExtend parameter is 0 and the requested shared-memory +** region has not been allocated (by any client, including one running in a +** separate process), then *pp is set to NULL and SQLITE_OK returned. If +** bExtend is non-zero and the requested shared-memory region has not yet +** been allocated, it is allocated by this function. +** +** If the shared-memory region has already been allocated or is allocated by +** this call as described above, then it is mapped into this processes +** address space (if it is not already), *pp is set to point to the mapped +** memory and SQLITE_OK returned. +*/ +static int unixShmMap( + sqlite3_file *fd, /* Handle open on database file */ + int iRegion, /* Region to retrieve */ + int szRegion, /* Size of regions */ + int bExtend, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + unixFile *pDbFd = (unixFile*)fd; + unixShm *p; + unixShmNode *pShmNode; + int rc = SQLITE_OK; + + /* If the shared-memory file has not yet been opened, open it now. */ + if( pDbFd->pShm==0 ){ + rc = unixOpenSharedMemory(pDbFd); + if( rc!=SQLITE_OK ) return rc; + } + + p = pDbFd->pShm; + pShmNode = p->pShmNode; + sqlite3_mutex_enter(pShmNode->mutex); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + + if( pShmNode->nRegion<=iRegion ){ + char **apNew; /* New apRegion[] array */ + int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ + struct stat sStat; /* Used by fstat() */ + + pShmNode->szRegion = szRegion; + + /* The requested region is not mapped into this processes address space. + ** Check to see if it has been allocated (i.e. if the wal-index file is + ** large enough to contain the requested region). + */ + if( fstat(pShmNode->h, &sStat) ){ + rc = SQLITE_IOERR_SHMSIZE; + goto shmpage_out; + } + + if( sStat.st_sizeh, nByte) ){ + rc = SQLITE_IOERR_SHMSIZE; + goto shmpage_out; + } + } + + /* Map the requested memory region into this processes address space. */ + apNew = (char **)sqlite3_realloc( + pShmNode->apRegion, (iRegion+1)*sizeof(char *) + ); + if( !apNew ){ + rc = SQLITE_IOERR_NOMEM; + goto shmpage_out; + } + pShmNode->apRegion = apNew; + while(pShmNode->nRegion<=iRegion){ + void *pMem = mmap(0, szRegion, PROT_READ|PROT_WRITE, + MAP_SHARED, pShmNode->h, iRegion*szRegion + ); + if( pMem==MAP_FAILED ){ + rc = SQLITE_IOERR; + goto shmpage_out; + } + pShmNode->apRegion[pShmNode->nRegion] = pMem; + pShmNode->nRegion++; + } + } + +shmpage_out: + if( pShmNode->nRegion>iRegion ){ + *pp = pShmNode->apRegion[iRegion]; + }else{ + *pp = 0; + } + sqlite3_mutex_leave(pShmNode->mutex); + return rc; +} + +/* +** Change the lock state for a shared-memory segment. +** +** Note that the relationship between SHAREd and EXCLUSIVE locks is a little +** different here than in posix. In xShmLock(), one can go from unlocked +** to shared and back or from unlocked to exclusive and back. But one may +** not go from shared to exclusive or from exclusive to shared. +*/ +static int unixShmLock( + sqlite3_file *fd, /* Database file holding the shared memory */ + int ofst, /* First lock to acquire or release */ + int n, /* Number of locks to acquire or release */ + int flags /* What to do with the lock */ +){ + unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ + unixShm *p = pDbFd->pShm; /* The shared memory being locked */ + unixShm *pX; /* For looping over all siblings */ + unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ + int rc = SQLITE_OK; /* Result code */ + u16 mask; /* Mask of locks to take or release */ + + assert( pShmNode==pDbFd->pInode->pShmNode ); + assert( pShmNode->pInode==pDbFd->pInode ); + assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); + assert( n>=1 ); + assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); + assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); + + mask = (1<<(ofst+n)) - (1<1 || mask==(1<mutex); + if( flags & SQLITE_SHM_UNLOCK ){ + u16 allMask = 0; /* Mask of locks held by siblings */ + + /* See if any siblings hold this same lock */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( pX==p ) continue; + assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); + allMask |= pX->sharedMask; + } + + /* Unlock the system-level locks */ + if( (mask & allMask)==0 ){ + rc = unixShmSystemLock(pShmNode, F_UNLCK, ofst+UNIX_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + + /* Undo the local locks */ + if( rc==SQLITE_OK ){ + p->exclMask &= ~mask; + p->sharedMask &= ~mask; + } + }else if( flags & SQLITE_SHM_SHARED ){ + u16 allShared = 0; /* Union of locks held by connections other than "p" */ + + /* Find out which shared locks are already held by sibling connections. + ** If any sibling already holds an exclusive lock, go ahead and return + ** SQLITE_BUSY. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + allShared |= pX->sharedMask; + } + + /* Get shared locks at the system level, if necessary */ + if( rc==SQLITE_OK ){ + if( (allShared & mask)==0 ){ + rc = unixShmSystemLock(pShmNode, F_RDLCK, ofst+UNIX_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + } + + /* Get the local shared locks */ + if( rc==SQLITE_OK ){ + p->sharedMask |= mask; + } + }else{ + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + } + + /* Get the exclusive locks at the system level. Then if successful + ** also mark the local connection as being locked. + */ + if( rc==SQLITE_OK ){ + rc = unixShmSystemLock(pShmNode, F_WRLCK, ofst+UNIX_SHM_BASE, n); + if( rc==SQLITE_OK ){ + assert( (p->sharedMask & mask)==0 ); + p->exclMask |= mask; + } + } + } + sqlite3_mutex_leave(pShmNode->mutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", + p->id, getpid(), p->sharedMask, p->exclMask)); + return rc; +} + +/* +** Implement a memory barrier or memory fence on shared memory. +** +** All loads and stores begun before the barrier must complete before +** any load or store begun after the barrier. +*/ +static void unixShmBarrier( + sqlite3_file *fd /* Database file holding the shared memory */ +){ + UNUSED_PARAMETER(fd); + unixEnterMutex(); + unixLeaveMutex(); +} + +/* +** Close a connection to shared-memory. Delete the underlying +** storage if deleteFlag is true. +** +** If there is no shared memory associated with the connection then this +** routine is a harmless no-op. +*/ +static int unixShmUnmap( + sqlite3_file *fd, /* The underlying database file */ + int deleteFlag /* Delete shared-memory if true */ +){ + unixShm *p; /* The connection to be closed */ + unixShmNode *pShmNode; /* The underlying shared-memory file */ + unixShm **pp; /* For looping over sibling connections */ + unixFile *pDbFd; /* The underlying database file */ + + pDbFd = (unixFile*)fd; + p = pDbFd->pShm; + if( p==0 ) return SQLITE_OK; + pShmNode = p->pShmNode; + + assert( pShmNode==pDbFd->pInode->pShmNode ); + assert( pShmNode->pInode==pDbFd->pInode ); + + /* Remove connection p from the set of connections associated + ** with pShmNode */ + sqlite3_mutex_enter(pShmNode->mutex); + for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} + *pp = p->pNext; + + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; + sqlite3_mutex_leave(pShmNode->mutex); + + /* If pShmNode->nRef has reached 0, then close the underlying + ** shared-memory file, too */ + unixEnterMutex(); + assert( pShmNode->nRef>0 ); + pShmNode->nRef--; + if( pShmNode->nRef==0 ){ + if( deleteFlag ) unlink(pShmNode->zFilename); + unixShmPurge(pDbFd); + } + unixLeaveMutex(); + + return SQLITE_OK; +} + + +#else +# define unixShmMap 0 +# define unixShmLock 0 +# define unixShmBarrier 0 +# define unixShmUnmap 0 +#endif /* #ifndef SQLITE_OMIT_WAL */ + /* ** Here ends the implementation of all sqlite3_file methods. ** @@ -3433,9 +3742,9 @@ static int unixDeviceCharacteristics(sqlite3_file *NotUsed){ ** * An I/O method finder function called FINDER that returns a pointer ** to the METHOD object in the previous bullet. */ -#define IOMETHODS(FINDER, METHOD, CLOSE, LOCK, UNLOCK, CKLOCK) \ +#define IOMETHODS(FINDER, METHOD, VERSION, CLOSE, LOCK, UNLOCK, CKLOCK) \ static const sqlite3_io_methods METHOD = { \ - 1, /* iVersion */ \ + VERSION, /* iVersion */ \ CLOSE, /* xClose */ \ unixRead, /* xRead */ \ unixWrite, /* xWrite */ \ @@ -3447,7 +3756,11 @@ static const sqlite3_io_methods METHOD = { \ CKLOCK, /* xCheckReservedLock */ \ unixFileControl, /* xFileControl */ \ unixSectorSize, /* xSectorSize */ \ - unixDeviceCharacteristics /* xDeviceCapabilities */ \ + unixDeviceCharacteristics, /* xDeviceCapabilities */ \ + unixShmMap, /* xShmMap */ \ + unixShmLock, /* xShmLock */ \ + unixShmBarrier, /* xShmBarrier */ \ + unixShmUnmap /* xShmUnmap */ \ }; \ static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \ UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \ @@ -3464,6 +3777,7 @@ static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \ IOMETHODS( posixIoFinder, /* Finder function name */ posixIoMethods, /* sqlite3_io_methods object name */ + 2, /* shared memory is enabled */ unixClose, /* xClose method */ unixLock, /* xLock method */ unixUnlock, /* xUnlock method */ @@ -3472,6 +3786,7 @@ IOMETHODS( IOMETHODS( nolockIoFinder, /* Finder function name */ nolockIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ nolockClose, /* xClose method */ nolockLock, /* xLock method */ nolockUnlock, /* xUnlock method */ @@ -3480,6 +3795,7 @@ IOMETHODS( IOMETHODS( dotlockIoFinder, /* Finder function name */ dotlockIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ dotlockClose, /* xClose method */ dotlockLock, /* xLock method */ dotlockUnlock, /* xUnlock method */ @@ -3490,6 +3806,7 @@ IOMETHODS( IOMETHODS( flockIoFinder, /* Finder function name */ flockIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ flockClose, /* xClose method */ flockLock, /* xLock method */ flockUnlock, /* xUnlock method */ @@ -3501,6 +3818,7 @@ IOMETHODS( IOMETHODS( semIoFinder, /* Finder function name */ semIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ semClose, /* xClose method */ semLock, /* xLock method */ semUnlock, /* xUnlock method */ @@ -3512,6 +3830,7 @@ IOMETHODS( IOMETHODS( afpIoFinder, /* Finder function name */ afpIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ afpClose, /* xClose method */ afpLock, /* xLock method */ afpUnlock, /* xUnlock method */ @@ -3536,6 +3855,7 @@ static int proxyCheckReservedLock(sqlite3_file*, int*); IOMETHODS( proxyIoFinder, /* Finder function name */ proxyIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ proxyClose, /* xClose method */ proxyLock, /* xLock method */ proxyUnlock, /* xUnlock method */ @@ -3548,6 +3868,7 @@ IOMETHODS( IOMETHODS( nfsIoFinder, /* Finder function name */ nfsIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ unixClose, /* xClose method */ unixLock, /* xLock method */ nfsUnlock, /* xUnlock method */ @@ -3688,19 +4009,19 @@ static int fillInUnixFile( unixFile *pNew = (unixFile *)pId; int rc = SQLITE_OK; - assert( pNew->pLock==NULL ); - assert( pNew->pOpen==NULL ); + assert( pNew->pInode==NULL ); /* Parameter isDelete is only used on vxworks. Express this explicitly ** here to prevent compiler warnings about unused parameters. */ UNUSED_PARAMETER(isDelete); - OSTRACE3("OPEN %-3d %s\n", h, zFilename); + OSTRACE(("OPEN %-3d %s\n", h, zFilename)); pNew->h = h; pNew->dirfd = dirfd; - SET_THREADID(pNew); pNew->fileFlags = 0; + assert( zFilename==0 || zFilename[0]=='/' ); /* Never a relative pathname */ + pNew->zPath = zFilename; #if OS_VXWORKS pNew->pId = vxworksFindFileId(zFilename); @@ -3728,10 +4049,10 @@ static int fillInUnixFile( #endif ){ unixEnterMutex(); - rc = findLockInfo(pNew, &pNew->pLock, &pNew->pOpen); + rc = findInodeInfo(pNew, &pNew->pInode); if( rc!=SQLITE_OK ){ - /* If an error occured in findLockInfo(), close the file descriptor - ** immediately, before releasing the mutex. findLockInfo() may fail + /* If an error occured in findInodeInfo(), close the file descriptor + ** immediately, before releasing the mutex. findInodeInfo() may fail ** in two scenarios: ** ** (a) A call to fstat() failed. @@ -3740,7 +4061,7 @@ static int fillInUnixFile( ** Scenario (b) may only occur if the process is holding no other ** file descriptors open on the same file. If there were other file ** descriptors on this file, then no malloc would be required by - ** findLockInfo(). If this is the case, it is quite safe to close + ** findInodeInfo(). If this is the case, it is quite safe to close ** handle h - as it is guaranteed that no posix locks will be released ** by doing so. ** @@ -3771,7 +4092,7 @@ static int fillInUnixFile( pCtx->reserved = 0; srandomdev(); unixEnterMutex(); - rc = findLockInfo(pNew, &pNew->pLock, &pNew->pOpen); + rc = findInodeInfo(pNew, &pNew->pInode); if( rc!=SQLITE_OK ){ sqlite3_free(pNew->lockingContext); close(h); @@ -3804,18 +4125,18 @@ static int fillInUnixFile( ** included in the semLockingContext */ unixEnterMutex(); - rc = findLockInfo(pNew, &pNew->pLock, &pNew->pOpen); - if( (rc==SQLITE_OK) && (pNew->pOpen->pSem==NULL) ){ - char *zSemName = pNew->pOpen->aSemName; + rc = findInodeInfo(pNew, &pNew->pInode); + if( (rc==SQLITE_OK) && (pNew->pInode->pSem==NULL) ){ + char *zSemName = pNew->pInode->aSemName; int n; sqlite3_snprintf(MAX_PATHNAME, zSemName, "/%s.sem", pNew->pId->zCanonicalName); for( n=1; zSemName[n]; n++ ) if( zSemName[n]=='/' ) zSemName[n] = '_'; - pNew->pOpen->pSem = sem_open(zSemName, O_CREAT, 0666, 1); - if( pNew->pOpen->pSem == SEM_FAILED ){ + pNew->pInode->pSem = sem_open(zSemName, O_CREAT, 0666, 1); + if( pNew->pInode->pSem == SEM_FAILED ){ rc = SQLITE_NOMEM; - pNew->pOpen->aSemName[0] = '\0'; + pNew->pInode->aSemName[0] = '\0'; } } unixLeaveMutex(); @@ -3866,7 +4187,7 @@ static int openDirectory(const char *zFilename, int *pFd){ #ifdef FD_CLOEXEC fcntl(fd, F_SETFD, fcntl(fd, F_GETFD, 0) | FD_CLOEXEC); #endif - OSTRACE3("OPENDIR %-3d %s\n", fd, zDirname); + OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname)); } } *pFd = fd; @@ -3874,26 +4195,46 @@ static int openDirectory(const char *zFilename, int *pFd){ } /* -** Create a temporary file name in zBuf. zBuf must be allocated -** by the calling process and must be big enough to hold at least -** pVfs->mxPathname bytes. +** Return the name of a directory in which to put temporary files. +** If no suitable temporary file directory can be found, return NULL. */ -static int getTempname(int nBuf, char *zBuf){ +static const char *unixTempFileDir(void){ static const char *azDirs[] = { 0, 0, "/var/tmp", "/usr/tmp", "/tmp", - ".", + 0 /* List terminator */ }; + unsigned int i; + struct stat buf; + const char *zDir = 0; + + azDirs[0] = sqlite3_temp_directory; + if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); + for(i=0; imxPathname bytes. +*/ +static int unixGetTempname(int nBuf, char *zBuf){ static const unsigned char zChars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; unsigned int i, j; - struct stat buf; - const char *zDir = "."; + const char *zDir; /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this @@ -3901,19 +4242,8 @@ static int getTempname(int nBuf, char *zBuf){ */ SimulateIOError( return SQLITE_IOERR ); - azDirs[0] = sqlite3_temp_directory; - if (NULL == azDirs[1]) { - azDirs[1] = getenv("TMPDIR"); - } - - for(i=0; ifileId.dev!=sStat.st_dev - || pOpen->fileId.ino!=sStat.st_ino) ){ - pOpen = pOpen->pNext; + pInode = inodeList; + while( pInode && (pInode->fileId.dev!=sStat.st_dev + || pInode->fileId.ino!=sStat.st_ino) ){ + pInode = pInode->pNext; } - if( pOpen ){ + if( pInode ){ UnixUnusedFd **pp; - for(pp=&pOpen->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext)); + for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext)); pUnused = *pp; if( pUnused ){ *pp = pUnused->pNext; @@ -4001,6 +4331,51 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){ return pUnused; } +/* +** This function is called by unixOpen() to determine the unix permissions +** to create new files with. If no error occurs, then SQLITE_OK is returned +** and a value suitable for passing as the third argument to open(2) is +** written to *pMode. If an IO error occurs, an SQLite error code is +** returned and the value of *pMode is not modified. +** +** If the file being opened is a temporary file, it is always created with +** the octal permissions 0600 (read/writable by owner only). If the file +** is a database or master journal file, it is created with the permissions +** mask SQLITE_DEFAULT_FILE_PERMISSIONS. +** +** Finally, if the file being opened is a WAL or regular journal file, then +** this function queries the file-system for the permissions on the +** corresponding database file and sets *pMode to this value. Whenever +** possible, WAL and journal files are created using the same permissions +** as the associated database file. +*/ +static int findCreateFileMode( + const char *zPath, /* Path of file (possibly) being created */ + int flags, /* Flags passed as 4th argument to xOpen() */ + mode_t *pMode /* OUT: Permissions to open file with */ +){ + int rc = SQLITE_OK; /* Return Code */ + if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ + char zDb[MAX_PATHNAME+1]; /* Database file path */ + int nDb; /* Number of valid bytes in zDb */ + struct stat sStat; /* Output of stat() on database file */ + + nDb = sqlite3Strlen30(zPath) - ((flags & SQLITE_OPEN_WAL) ? 4 : 8); + memcpy(zDb, zPath, nDb); + zDb[nDb] = '\0'; + if( 0==stat(zDb, &sStat) ){ + *pMode = sStat.st_mode & 0777; + }else{ + rc = SQLITE_IOERR_FSTAT; + } + }else if( flags & SQLITE_OPEN_DELETEONCLOSE ){ + *pMode = 0600; + }else{ + *pMode = SQLITE_DEFAULT_FILE_PERMISSIONS; + } + return rc; +} + /* ** Open the file zPath. ** @@ -4051,9 +4426,11 @@ static int unixOpen( ** a file-descriptor on the directory too. The first time unixSync() ** is called the directory file descriptor will be fsync()ed and close()d. */ - int isOpenDirectory = (isCreate && - (eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL) - ); + int isOpenDirectory = (isCreate && ( + eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_MAIN_JOURNAL + || eType==SQLITE_OPEN_WAL + )); /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. @@ -4073,17 +4450,18 @@ static int unixOpen( assert(isExclusive==0 || isCreate); assert(isDelete==0 || isCreate); - /* The main DB, main journal, and master journal are never automatically - ** deleted. Nor are they ever temporary files. */ + /* The main DB, main journal, WAL file and master journal are never + ** automatically deleted. Nor are they ever temporary files. */ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); /* Assert that the upper layer has set one of the "file-type" flags. */ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL - || eType==SQLITE_OPEN_TRANSIENT_DB + || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL ); memset(p, 0, sizeof(unixFile)); @@ -4103,7 +4481,7 @@ static int unixOpen( }else if( !zName ){ /* If zName is NULL, the upper layer is requesting a temp file. */ assert(isDelete && !isOpenDirectory); - rc = getTempname(MAX_PATHNAME+1, zTmpname); + rc = unixGetTempname(MAX_PATHNAME+1, zTmpname); if( rc!=SQLITE_OK ){ return rc; } @@ -4121,9 +4499,15 @@ static int unixOpen( openFlags |= (O_LARGEFILE|O_BINARY); if( fd<0 ){ - mode_t openMode = (isDelete?0600:SQLITE_DEFAULT_FILE_PERMISSIONS); + mode_t openMode; /* Permissions to create file with */ + rc = findCreateFileMode(zName, flags, &openMode); + if( rc!=SQLITE_OK ){ + assert( !p->pUnused ); + assert( eType==SQLITE_OPEN_WAL || eType==SQLITE_OPEN_MAIN_JOURNAL ); + return rc; + } fd = open(zName, openFlags, openMode); - OSTRACE4("OPENX %-3d %s 0%o\n", fd, zName, openFlags); + OSTRACE(("OPENX %-3d %s 0%o\n", fd, zName, openFlags)); if( fd<0 && errno!=EISDIR && isReadWrite && !isExclusive ){ /* Failed to open the file for read/write access. Try read-only. */ flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); @@ -4264,7 +4648,9 @@ static int unixDelete( int rc = SQLITE_OK; UNUSED_PARAMETER(NotUsed); SimulateIOError(return SQLITE_IOERR_DELETE); - unlink(zPath); + if( unlink(zPath)==(-1) && errno!=ENOENT ){ + return SQLITE_IOERR_DELETE; + } #ifndef SQLITE_DISABLE_DIRSYNC if( dirSync ){ int fd; @@ -4321,6 +4707,12 @@ static int unixAccess( assert(!"Invalid flags argument"); } *pResOut = (access(zPath, amode)==0); + if( flags==SQLITE_ACCESS_EXISTS && *pResOut ){ + struct stat buf; + if( 0==stat(zPath, &buf) && buf.st_size==0 ){ + *pResOut = 0; + } + } return SQLITE_OK; } @@ -4508,36 +4900,50 @@ static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ #endif +/* +** Find the current time (in Universal Coordinated Time). Write into *piNow +** the current time and date as a Julian Day number times 86_400_000. In +** other words, write into *piNow the number of milliseconds since the Julian +** epoch of noon in Greenwich on November 24, 4714 B.C according to the +** proleptic Gregorian calendar. +** +** On success, return 0. Return 1 if the time and date cannot be found. +*/ +static int unixCurrentTimeInt64(sqlite3_vfs *NotUsed, sqlite3_int64 *piNow){ + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; +#if defined(NO_GETTOD) + time_t t; + time(&t); + *piNow = ((sqlite3_int64)i)*1000 + unixEpoch; +#elif OS_VXWORKS + struct timespec sNow; + clock_gettime(CLOCK_REALTIME, &sNow); + *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_nsec/1000000; +#else + struct timeval sNow; + gettimeofday(&sNow, 0); + *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; +#endif + +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; + } +#endif + UNUSED_PARAMETER(NotUsed); + return 0; +} + /* ** Find the current time (in Universal Coordinated Time). Write the ** current time and date as a Julian Day number into *prNow and ** return 0. Return 1 if the time and date cannot be found. */ static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){ -#if defined(SQLITE_OMIT_FLOATING_POINT) - time_t t; - time(&t); - *prNow = (((sqlite3_int64)t)/8640 + 24405875)/10; -#elif defined(NO_GETTOD) - time_t t; - time(&t); - *prNow = t/86400.0 + 2440587.5; -#elif OS_VXWORKS - struct timespec sNow; - clock_gettime(CLOCK_REALTIME, &sNow); - *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_nsec/86400000000000.0; -#else - struct timeval sNow; - gettimeofday(&sNow, 0); - *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_usec/86400000000.0; -#endif - -#ifdef SQLITE_TEST - if( sqlite3_current_time ){ - *prNow = sqlite3_current_time/86400.0 + 2440587.5; - } -#endif + sqlite3_int64 i; UNUSED_PARAMETER(NotUsed); + unixCurrentTimeInt64(0, &i); + *prNow = i/86400000.0; return 0; } @@ -4555,6 +4961,7 @@ static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){ return 0; } + /* ************************ End of sqlite3_vfs methods *************************** ******************************************************************************/ @@ -4745,8 +5152,8 @@ static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ # ifdef _CS_DARWIN_USER_TEMP_DIR { if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){ - OSTRACE4("GETLOCKPATH failed %s errno=%d pid=%d\n", - lPath, errno, getpid()); + OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n", + lPath, errno, getpid())); return SQLITE_IOERR_LOCK; } len = strlcat(lPath, "sqliteplocks", maxLen); @@ -4762,13 +5169,13 @@ static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ /* transform the db path to a unique cache name */ dbLen = (int)strlen(dbPath); - for( i=0; ih, - (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), getpid()); + OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h, + (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), getpid())); rc = proxyGetHostID(myHostID, &pError); if( (rc&0xff)==SQLITE_IOERR ){ @@ -5177,7 +5588,7 @@ static int proxyTakeConch(unixFile *pFile){ */ futimes(conchFile->h, NULL); if( hostIdMatch && !createConch ){ - if( conchFile->pLock && conchFile->pLock->cnt>1 ){ + if( conchFile->pInode && conchFile->pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; @@ -5233,7 +5644,7 @@ static int proxyTakeConch(unixFile *pFile){ conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK); end_takeconch: - OSTRACE2("TRANSPROXY: CLOSE %d\n", pFile->h); + OSTRACE(("TRANSPROXY: CLOSE %d\n", pFile->h)); if( rc==SQLITE_OK && pFile->openFlags ){ if( pFile->h>=0 ){ #ifdef STRICT_CLOSE_ERROR @@ -5248,7 +5659,7 @@ static int proxyTakeConch(unixFile *pFile){ pFile->h = -1; int fd = open(pCtx->dbPath, pFile->openFlags, SQLITE_DEFAULT_FILE_PERMISSIONS); - OSTRACE2("TRANSPROXY: OPEN %d\n", fd); + OSTRACE(("TRANSPROXY: OPEN %d\n", fd)); if( fd>=0 ){ pFile->h = fd; }else{ @@ -5290,9 +5701,11 @@ static int proxyTakeConch(unixFile *pFile){ } else { conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); } - OSTRACE3("TAKECONCH %d %s\n", conchFile->h, rc==SQLITE_OK?"ok":"failed"); + OSTRACE(("TAKECONCH %d %s\n", conchFile->h, + rc==SQLITE_OK?"ok":"failed")); return rc; - } while (1); /* in case we need to retry the :auto: lock file - we should never get here except via the 'continue' call. */ + } while (1); /* in case we need to retry the :auto: lock file - + ** we should never get here except via the 'continue' call. */ } } @@ -5300,21 +5713,21 @@ static int proxyTakeConch(unixFile *pFile){ ** If pFile holds a lock on a conch file, then release that lock. */ static int proxyReleaseConch(unixFile *pFile){ - int rc; /* Subroutine return code */ + int rc = SQLITE_OK; /* Subroutine return code */ proxyLockingContext *pCtx; /* The locking context for the proxy lock */ unixFile *conchFile; /* Name of the conch file */ pCtx = (proxyLockingContext *)pFile->lockingContext; conchFile = pCtx->conchFile; - OSTRACE4("RELEASECONCH %d for %s pid=%d\n", conchFile->h, + OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h, (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), - getpid()); + getpid())); if( pCtx->conchHeld>0 ){ rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); } pCtx->conchHeld = 0; - OSTRACE3("RELEASECONCH %d %s\n", conchFile->h, - (rc==SQLITE_OK ? "ok" : "failed")); + OSTRACE(("RELEASECONCH %d %s\n", conchFile->h, + (rc==SQLITE_OK ? "ok" : "failed"))); return rc; } @@ -5371,7 +5784,7 @@ static int switchLockProxyPath(unixFile *pFile, const char *path) { char *oldPath = pCtx->lockProxyPath; int rc = SQLITE_OK; - if( pFile->locktype!=NO_LOCK ){ + if( pFile->eFileLock!=NO_LOCK ){ return SQLITE_BUSY; } @@ -5438,7 +5851,7 @@ static int proxyTransformUnixFile(unixFile *pFile, const char *path) { char *lockPath=NULL; int rc = SQLITE_OK; - if( pFile->locktype!=NO_LOCK ){ + if( pFile->eFileLock!=NO_LOCK ){ return SQLITE_BUSY; } proxyGetDbPathForUnixFile(pFile, dbPath); @@ -5448,8 +5861,8 @@ static int proxyTransformUnixFile(unixFile *pFile, const char *path) { lockPath=(char *)path; } - OSTRACE4("TRANSPROXY %d for %s pid=%d\n", pFile->h, - (lockPath ? lockPath : ":auto:"), getpid()); + OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h, + (lockPath ? lockPath : ":auto:"), getpid())); pCtx = sqlite3_malloc( sizeof(*pCtx) ); if( pCtx==0 ){ @@ -5509,8 +5922,8 @@ static int proxyTransformUnixFile(unixFile *pFile, const char *path) { sqlite3_free(pCtx->conchFilePath); sqlite3_free(pCtx); } - OSTRACE3("TRANSPROXY %d %s\n", pFile->h, - (rc==SQLITE_OK ? "ok" : "failed")); + OSTRACE(("TRANSPROXY %d %s\n", pFile->h, + (rc==SQLITE_OK ? "ok" : "failed"))); return rc; } @@ -5605,7 +6018,7 @@ static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { } /* -** Lock the file with the lock specified by parameter locktype - one +** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK @@ -5628,15 +6041,15 @@ static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ -static int proxyLock(sqlite3_file *id, int locktype) { +static int proxyLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int rc = proxyTakeConch(pFile); if( rc==SQLITE_OK ){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld>0 ){ unixFile *proxy = pCtx->lockProxy; - rc = proxy->pMethod->xLock((sqlite3_file*)proxy, locktype); - pFile->locktype = proxy->locktype; + rc = proxy->pMethod->xLock((sqlite3_file*)proxy, eFileLock); + pFile->eFileLock = proxy->eFileLock; }else{ /* conchHeld < 0 is lockless */ } @@ -5646,21 +6059,21 @@ static int proxyLock(sqlite3_file *id, int locktype) { /* -** Lower the locking level on file descriptor pFile to locktype. locktype +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ -static int proxyUnlock(sqlite3_file *id, int locktype) { +static int proxyUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int rc = proxyTakeConch(pFile); if( rc==SQLITE_OK ){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld>0 ){ unixFile *proxy = pCtx->lockProxy; - rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, locktype); - pFile->locktype = proxy->locktype; + rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, eFileLock); + pFile->eFileLock = proxy->eFileLock; }else{ /* conchHeld < 0 is lockless */ } @@ -5755,7 +6168,7 @@ int sqlite3_os_init(void){ ** that filesystem time. */ #define UNIXVFS(VFSNAME, FINDER) { \ - 1, /* iVersion */ \ + 2, /* iVersion */ \ sizeof(unixFile), /* szOsFile */ \ MAX_PATHNAME, /* mxPathname */ \ 0, /* pNext */ \ @@ -5772,7 +6185,8 @@ int sqlite3_os_init(void){ unixRandomness, /* xRandomness */ \ unixSleep, /* xSleep */ \ unixCurrentTime, /* xCurrentTime */ \ - unixGetLastError /* xGetLastError */ \ + unixGetLastError, /* xGetLastError */ \ + unixCurrentTimeInt64, /* xCurrentTimeInt64 */ \ } /* diff --git a/src/os_win.c b/src/os_win.c index 4721ead..0951312 100644 --- a/src/os_win.c +++ b/src/os_win.c @@ -76,6 +76,10 @@ # define FormatMessageW(a,b,c,d,e,f,g) 0 #endif +/* Forward references */ +typedef struct winShm winShm; /* A connection to shared-memory */ +typedef struct winShmNode winShmNode; /* A region of shared-memory */ + /* ** WinCE lacks native support for file locking so we have to fake it ** with some code of our own. @@ -95,12 +99,15 @@ typedef struct winceLock { */ typedef struct winFile winFile; struct winFile { - const sqlite3_io_methods *pMethod;/* Must be first */ + const sqlite3_io_methods *pMethod; /*** Must be first ***/ + sqlite3_vfs *pVfs; /* The VFS used to open this file */ HANDLE h; /* Handle for accessing the file */ unsigned char locktype; /* Type of lock currently held on this file */ short sharedLockByte; /* Randomly chosen byte used as a shared lock */ DWORD lastErrno; /* The Windows errno from the last I/O error */ DWORD sectorSize; /* Sector size of the device file is on */ + winShm *pShm; /* Instance of shared memory on this file */ + const char *zPath; /* Full pathname of this file */ #if SQLITE_OS_WINCE WCHAR *zDeleteOnClose; /* Name of file to delete when closing */ HANDLE hMutex; /* Mutex used to control access to shared lock */ @@ -629,9 +636,11 @@ static int winClose(sqlite3_file *id){ winFile *pFile = (winFile*)id; assert( id!=0 ); - OSTRACE2("CLOSE %d\n", pFile->h); + assert( pFile->pShm==0 ); + OSTRACE(("CLOSE %d\n", pFile->h)); do{ rc = CloseHandle(pFile->h); + /* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */ }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (Sleep(100), 1) ); #if SQLITE_OS_WINCE #define WINCE_DELETION_ATTEMPTS 3 @@ -648,6 +657,7 @@ static int winClose(sqlite3_file *id){ free(pFile->zDeleteOnClose); } #endif + OSTRACE(("CLOSE %d %s\n", pFile->h, rc ? "ok" : "failed")); OpenCounter(-1); return rc ? SQLITE_OK : SQLITE_IOERR; } @@ -679,7 +689,7 @@ static int winRead( assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_READ); - OSTRACE3("READ %d lock=%d\n", pFile->h, pFile->locktype); + OSTRACE(("READ %d lock=%d\n", pFile->h, pFile->locktype)); rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ pFile->lastErrno = error; @@ -718,11 +728,15 @@ static int winWrite( assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_WRITE); SimulateDiskfullError(return SQLITE_FULL); - OSTRACE3("WRITE %d lock=%d\n", pFile->h, pFile->locktype); + OSTRACE(("WRITE %d lock=%d\n", pFile->h, pFile->locktype)); rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ pFile->lastErrno = error; - return SQLITE_FULL; + if( pFile->lastErrno==ERROR_HANDLE_DISK_FULL ){ + return SQLITE_FULL; + }else{ + return SQLITE_IOERR_WRITE; + } } assert( amt>0 ); while( @@ -735,7 +749,11 @@ static int winWrite( } if( !rc || amt>(int)wrote ){ pFile->lastErrno = GetLastError(); - return SQLITE_FULL; + if( pFile->lastErrno==ERROR_HANDLE_DISK_FULL ){ + return SQLITE_FULL; + }else{ + return SQLITE_IOERR_WRITE; + } } return SQLITE_OK; } @@ -746,24 +764,25 @@ static int winWrite( static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ LONG upperBits = (LONG)((nByte>>32) & 0x7fffffff); LONG lowerBits = (LONG)(nByte & 0xffffffff); - DWORD rc; + DWORD dwRet; winFile *pFile = (winFile*)id; DWORD error; + int rc = SQLITE_OK; assert( id!=0 ); - OSTRACE3("TRUNCATE %d %lld\n", pFile->h, nByte); + OSTRACE(("TRUNCATE %d %lld\n", pFile->h, nByte)); SimulateIOError(return SQLITE_IOERR_TRUNCATE); - rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); - if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ + dwRet = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( dwRet==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ pFile->lastErrno = error; - return SQLITE_IOERR_TRUNCATE; - } + rc = SQLITE_IOERR_TRUNCATE; /* SetEndOfFile will fail if nByte is negative */ - if( !SetEndOfFile(pFile->h) ){ + }else if( !SetEndOfFile(pFile->h) ){ pFile->lastErrno = GetLastError(); - return SQLITE_IOERR_TRUNCATE; + rc = SQLITE_IOERR_TRUNCATE; } - return SQLITE_OK; + OSTRACE(("TRUNCATE %d %lld %s\n", pFile->h, nByte, rc==SQLITE_OK ? "ok" : "failed")); + return rc; } #ifdef SQLITE_TEST @@ -779,14 +798,20 @@ int sqlite3_fullsync_count = 0; ** Make sure all writes to a particular file are committed to disk. */ static int winSync(sqlite3_file *id, int flags){ -#ifndef SQLITE_NO_SYNC +#if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || defined(SQLITE_DEBUG) winFile *pFile = (winFile*)id; - - assert( id!=0 ); - OSTRACE3("SYNC %d lock=%d\n", pFile->h, pFile->locktype); #else UNUSED_PARAMETER(id); #endif + + assert( pFile ); + /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ + assert((flags&0x0F)==SQLITE_SYNC_NORMAL + || (flags&0x0F)==SQLITE_SYNC_FULL + ); + + OSTRACE(("SYNC %d lock=%d\n", pFile->h, pFile->locktype)); + #ifndef SQLITE_TEST UNUSED_PARAMETER(flags); #else @@ -795,11 +820,18 @@ static int winSync(sqlite3_file *id, int flags){ } sqlite3_sync_count++; #endif + + /* Unix cannot, but some systems may return SQLITE_FULL from here. This + ** line is to test that doing so does not cause any problems. + */ + SimulateDiskfullError( return SQLITE_FULL ); + SimulateIOError( return SQLITE_IOERR; ); + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a ** no-op */ #ifdef SQLITE_NO_SYNC - return SQLITE_OK; + return SQLITE_OK; #else if( FlushFileBuffers(pFile->h) ){ return SQLITE_OK; @@ -924,8 +956,8 @@ static int winLock(sqlite3_file *id, int locktype){ DWORD error = NO_ERROR; assert( id!=0 ); - OSTRACE5("LOCK %d %d was %d(%d)\n", - pFile->h, locktype, pFile->locktype, pFile->sharedLockByte); + OSTRACE(("LOCK %d %d was %d(%d)\n", + pFile->h, locktype, pFile->locktype, pFile->sharedLockByte)); /* If there is already a lock of this type or more restrictive on the ** OsFile, do nothing. Don't use the end_lock: exit path, as @@ -955,7 +987,7 @@ static int winLock(sqlite3_file *id, int locktype){ /* Try 3 times to get the pending lock. The pending lock might be ** held by another reader process who will release it momentarily. */ - OSTRACE2("could not get a PENDING lock. cnt=%d\n", cnt); + OSTRACE(("could not get a PENDING lock. cnt=%d\n", cnt)); Sleep(1); } gotPendingLock = res; @@ -1000,13 +1032,13 @@ static int winLock(sqlite3_file *id, int locktype){ if( locktype==EXCLUSIVE_LOCK && res ){ assert( pFile->locktype>=SHARED_LOCK ); res = unlockReadLock(pFile); - OSTRACE2("unreadlock = %d\n", res); + OSTRACE(("unreadlock = %d\n", res)); res = LockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); if( res ){ newLocktype = EXCLUSIVE_LOCK; }else{ error = GetLastError(); - OSTRACE2("error-code = %d\n", error); + OSTRACE(("error-code = %d\n", error)); getReadLock(pFile); } } @@ -1024,8 +1056,8 @@ static int winLock(sqlite3_file *id, int locktype){ if( res ){ rc = SQLITE_OK; }else{ - OSTRACE4("LOCK FAILED %d trying for %d but got %d\n", pFile->h, - locktype, newLocktype); + OSTRACE(("LOCK FAILED %d trying for %d but got %d\n", pFile->h, + locktype, newLocktype)); pFile->lastErrno = error; rc = SQLITE_BUSY; } @@ -1042,17 +1074,19 @@ static int winCheckReservedLock(sqlite3_file *id, int *pResOut){ int rc; winFile *pFile = (winFile*)id; + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + assert( id!=0 ); if( pFile->locktype>=RESERVED_LOCK ){ rc = 1; - OSTRACE3("TEST WR-LOCK %d %d (local)\n", pFile->h, rc); + OSTRACE(("TEST WR-LOCK %d %d (local)\n", pFile->h, rc)); }else{ rc = LockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); if( rc ){ UnlockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); } rc = !rc; - OSTRACE3("TEST WR-LOCK %d %d (remote)\n", pFile->h, rc); + OSTRACE(("TEST WR-LOCK %d %d (remote)\n", pFile->h, rc)); } *pResOut = rc; return SQLITE_OK; @@ -1075,8 +1109,8 @@ static int winUnlock(sqlite3_file *id, int locktype){ int rc = SQLITE_OK; assert( pFile!=0 ); assert( locktype<=SHARED_LOCK ); - OSTRACE5("UNLOCK %d to %d was %d(%d)\n", pFile->h, locktype, - pFile->locktype, pFile->sharedLockByte); + OSTRACE(("UNLOCK %d to %d was %d(%d)\n", pFile->h, locktype, + pFile->locktype, pFile->sharedLockByte)); type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ UnlockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); @@ -1112,6 +1146,13 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ *(int*)pArg = (int)((winFile*)id)->lastErrno; return SQLITE_OK; } + case SQLITE_FCNTL_SIZE_HINT: { + sqlite3_int64 sz = *(sqlite3_int64*)pArg; + SimulateIOErrorBenign(1); + winTruncate(id, sz); + SimulateIOErrorBenign(0); + return SQLITE_OK; + } } return SQLITE_ERROR; } @@ -1136,34 +1177,647 @@ static int winSectorSize(sqlite3_file *id){ */ static int winDeviceCharacteristics(sqlite3_file *id){ UNUSED_PARAMETER(id); - return 0; + return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN; } +#ifndef SQLITE_OMIT_WAL + +/* +** Helper functions to obtain and relinquish the global mutex. The +** global mutex is used to protect the winLockInfo objects used by +** this file, all of which may be shared by multiple threads. +** +** Function winShmMutexHeld() is used to assert() that the global mutex +** is held when required. This function is only used as part of assert() +** statements. e.g. +** +** winShmEnterMutex() +** assert( winShmMutexHeld() ); +** winShmLeaveMutex() +*/ +static void winShmEnterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} +static void winShmLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} +#ifdef SQLITE_DEBUG +static int winShmMutexHeld(void) { + return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} +#endif + +/* +** Object used to represent a single file opened and mmapped to provide +** shared memory. When multiple threads all reference the same +** log-summary, each thread has its own winFile object, but they all +** point to a single instance of this object. In other words, each +** log-summary is opened only once per process. +** +** winShmMutexHeld() must be true when creating or destroying +** this object or while reading or writing the following fields: +** +** nRef +** pNext +** +** The following fields are read-only after the object is created: +** +** fid +** zFilename +** +** Either winShmNode.mutex must be held or winShmNode.nRef==0 and +** winShmMutexHeld() is true when reading or writing any other field +** in this structure. +** +*/ +struct winShmNode { + sqlite3_mutex *mutex; /* Mutex to access this object */ + char *zFilename; /* Name of the file */ + winFile hFile; /* File handle from winOpen */ + + int szRegion; /* Size of shared-memory regions */ + int nRegion; /* Size of array apRegion */ + struct ShmRegion { + HANDLE hMap; /* File handle from CreateFileMapping */ + void *pMap; + } *aRegion; + DWORD lastErrno; /* The Windows errno from the last I/O error */ + + int nRef; /* Number of winShm objects pointing to this */ + winShm *pFirst; /* All winShm objects pointing to this */ + winShmNode *pNext; /* Next in list of all winShmNode objects */ +#ifdef SQLITE_DEBUG + u8 nextShmId; /* Next available winShm.id value */ +#endif +}; + +/* +** A global array of all winShmNode objects. +** +** The winShmMutexHeld() must be true while reading or writing this list. +*/ +static winShmNode *winShmNodeList = 0; + +/* +** Structure used internally by this VFS to record the state of an +** open shared memory connection. +** +** The following fields are initialized when this object is created and +** are read-only thereafter: +** +** winShm.pShmNode +** winShm.id +** +** All other fields are read/write. The winShm.pShmNode->mutex must be held +** while accessing any read/write fields. +*/ +struct winShm { + winShmNode *pShmNode; /* The underlying winShmNode object */ + winShm *pNext; /* Next winShm with the same winShmNode */ + u8 hasMutex; /* True if holding the winShmNode mutex */ + u16 sharedMask; /* Mask of shared locks held */ + u16 exclMask; /* Mask of exclusive locks held */ +#ifdef SQLITE_DEBUG + u8 id; /* Id of this connection with its winShmNode */ +#endif +}; + +/* +** Constants used for locking +*/ +#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ +#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ + +/* +** Apply advisory locks for all n bytes beginning at ofst. +*/ +#define _SHM_UNLCK 1 +#define _SHM_RDLCK 2 +#define _SHM_WRLCK 3 +static int winShmSystemLock( + winShmNode *pFile, /* Apply locks to this open shared-memory segment */ + int lockType, /* _SHM_UNLCK, _SHM_RDLCK, or _SHM_WRLCK */ + int ofst, /* Offset to first byte to be locked/unlocked */ + int nByte /* Number of bytes to lock or unlock */ +){ + OVERLAPPED ovlp; + DWORD dwFlags; + int rc = 0; /* Result code form Lock/UnlockFileEx() */ + + /* Access to the winShmNode object is serialized by the caller */ + assert( sqlite3_mutex_held(pFile->mutex) || pFile->nRef==0 ); + + /* Initialize the locking parameters */ + dwFlags = LOCKFILE_FAIL_IMMEDIATELY; + if( lockType == _SHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + + memset(&ovlp, 0, sizeof(OVERLAPPED)); + ovlp.Offset = ofst; + + /* Release/Acquire the system-level lock */ + if( lockType==_SHM_UNLCK ){ + rc = UnlockFileEx(pFile->hFile.h, 0, nByte, 0, &ovlp); + }else{ + rc = LockFileEx(pFile->hFile.h, dwFlags, 0, nByte, 0, &ovlp); + } + + if( rc!= 0 ){ + rc = SQLITE_OK; + }else{ + pFile->lastErrno = GetLastError(); + rc = SQLITE_BUSY; + } + + OSTRACE(("SHM-LOCK %d %s %s 0x%08lx\n", + pFile->hFile.h, + rc==SQLITE_OK ? "ok" : "failed", + lockType==_SHM_UNLCK ? "UnlockFileEx" : "LockFileEx", + pFile->lastErrno)); + + return rc; +} + +/* Forward references to VFS methods */ +static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); +static int winDelete(sqlite3_vfs *,const char*,int); + +/* +** Purge the winShmNodeList list of all entries with winShmNode.nRef==0. +** +** This is not a VFS shared-memory method; it is a utility function called +** by VFS shared-memory methods. +*/ +static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ + winShmNode **pp; + winShmNode *p; + assert( winShmMutexHeld() ); + pp = &winShmNodeList; + while( (p = *pp)!=0 ){ + if( p->nRef==0 ){ + int i; + if( p->mutex ) sqlite3_mutex_free(p->mutex); + for(i=0; inRegion; i++){ + UnmapViewOfFile(p->aRegion[i].pMap); + CloseHandle(p->aRegion[i].hMap); + } + if( p->hFile.h != INVALID_HANDLE_VALUE ){ + SimulateIOErrorBenign(1); + winClose((sqlite3_file *)&p->hFile); + SimulateIOErrorBenign(0); + } + if( deleteFlag ){ + SimulateIOErrorBenign(1); + winDelete(pVfs, p->zFilename, 0); + SimulateIOErrorBenign(0); + } + *pp = p->pNext; + sqlite3_free(p->aRegion); + sqlite3_free(p); + }else{ + pp = &p->pNext; + } + } +} + +/* +** Open the shared-memory area associated with database file pDbFd. +** +** When opening a new shared-memory file, if no other instances of that +** file are currently open, in this process or in other processes, then +** the file must be truncated to zero length or have its header cleared. +*/ +static int winOpenSharedMemory(winFile *pDbFd){ + struct winShm *p; /* The connection to be opened */ + struct winShmNode *pShmNode = 0; /* The underlying mmapped file */ + int rc; /* Result code */ + struct winShmNode *pNew; /* Newly allocated winShmNode */ + int nName; /* Size of zName in bytes */ + + assert( pDbFd->pShm==0 ); /* Not previously opened */ + + /* Allocate space for the new sqlite3_shm object. Also speculatively + ** allocate space for a new winShmNode and filename. + */ + p = sqlite3_malloc( sizeof(*p) ); + if( p==0 ) return SQLITE_NOMEM; + memset(p, 0, sizeof(*p)); + nName = sqlite3Strlen30(pDbFd->zPath); + pNew = sqlite3_malloc( sizeof(*pShmNode) + nName + 15 ); + if( pNew==0 ){ + sqlite3_free(p); + return SQLITE_NOMEM; + } + memset(pNew, 0, sizeof(*pNew)); + pNew->zFilename = (char*)&pNew[1]; + sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); + + /* Look to see if there is an existing winShmNode that can be used. + ** If no matching winShmNode currently exists, create a new one. + */ + winShmEnterMutex(); + for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ + /* TBD need to come up with better match here. Perhaps + ** use FILE_ID_BOTH_DIR_INFO Structure. + */ + if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; + } + if( pShmNode ){ + sqlite3_free(pNew); + }else{ + pShmNode = pNew; + pNew = 0; + ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; + pShmNode->pNext = winShmNodeList; + winShmNodeList = pShmNode; + + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_NOMEM; + goto shm_open_err; + } + rc = winOpen(pDbFd->pVfs, + pShmNode->zFilename, /* Name of the file (UTF-8) */ + (sqlite3_file*)&pShmNode->hFile, /* File handle here */ + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, /* Mode flags */ + 0); + if( SQLITE_OK!=rc ){ + rc = SQLITE_CANTOPEN_BKPT; + goto shm_open_err; + } + + /* Check to see if another process is holding the dead-man switch. + ** If not, truncate the file to zero length. + */ + if( winShmSystemLock(pShmNode, _SHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ + rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0); + if( rc!=SQLITE_OK ){ + rc = SQLITE_IOERR_SHMOPEN; + } + } + if( rc==SQLITE_OK ){ + winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); + rc = winShmSystemLock(pShmNode, _SHM_RDLCK, WIN_SHM_DMS, 1); + } + if( rc ) goto shm_open_err; + } + + /* Make the new connection a child of the winShmNode */ + p->pShmNode = pShmNode; +#ifdef SQLITE_DEBUG + p->id = pShmNode->nextShmId++; +#endif + pShmNode->nRef++; + pDbFd->pShm = p; + winShmLeaveMutex(); + + /* The reference count on pShmNode has already been incremented under + ** the cover of the winShmEnterMutex() mutex and the pointer from the + ** new (struct winShm) object to the pShmNode has been set. All that is + ** left to do is to link the new object into the linked list starting + ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex + ** mutex. + */ + sqlite3_mutex_enter(pShmNode->mutex); + p->pNext = pShmNode->pFirst; + pShmNode->pFirst = p; + sqlite3_mutex_leave(pShmNode->mutex); + return SQLITE_OK; + + /* Jump here on any error */ +shm_open_err: + winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); + winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ + sqlite3_free(p); + sqlite3_free(pNew); + winShmLeaveMutex(); + return rc; +} + +/* +** Close a connection to shared-memory. Delete the underlying +** storage if deleteFlag is true. +*/ +static int winShmUnmap( + sqlite3_file *fd, /* Database holding shared memory */ + int deleteFlag /* Delete after closing if true */ +){ + winFile *pDbFd; /* Database holding shared-memory */ + winShm *p; /* The connection to be closed */ + winShmNode *pShmNode; /* The underlying shared-memory file */ + winShm **pp; /* For looping over sibling connections */ + + pDbFd = (winFile*)fd; + p = pDbFd->pShm; + if( p==0 ) return SQLITE_OK; + pShmNode = p->pShmNode; + + /* Remove connection p from the set of connections associated + ** with pShmNode */ + sqlite3_mutex_enter(pShmNode->mutex); + for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} + *pp = p->pNext; + + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; + sqlite3_mutex_leave(pShmNode->mutex); + + /* If pShmNode->nRef has reached 0, then close the underlying + ** shared-memory file, too */ + winShmEnterMutex(); + assert( pShmNode->nRef>0 ); + pShmNode->nRef--; + if( pShmNode->nRef==0 ){ + winShmPurge(pDbFd->pVfs, deleteFlag); + } + winShmLeaveMutex(); + + return SQLITE_OK; +} + +/* +** Change the lock state for a shared-memory segment. +*/ +static int winShmLock( + sqlite3_file *fd, /* Database file holding the shared memory */ + int ofst, /* First lock to acquire or release */ + int n, /* Number of locks to acquire or release */ + int flags /* What to do with the lock */ +){ + winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ + winShm *p = pDbFd->pShm; /* The shared memory being locked */ + winShm *pX; /* For looping over all siblings */ + winShmNode *pShmNode = p->pShmNode; + int rc = SQLITE_OK; /* Result code */ + u16 mask; /* Mask of locks to take or release */ + + assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); + assert( n>=1 ); + assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); + assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); + + mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); + if( flags & SQLITE_SHM_UNLOCK ){ + u16 allMask = 0; /* Mask of locks held by siblings */ + + /* See if any siblings hold this same lock */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( pX==p ) continue; + assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); + allMask |= pX->sharedMask; + } + + /* Unlock the system-level locks */ + if( (mask & allMask)==0 ){ + rc = winShmSystemLock(pShmNode, _SHM_UNLCK, ofst+WIN_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + + /* Undo the local locks */ + if( rc==SQLITE_OK ){ + p->exclMask &= ~mask; + p->sharedMask &= ~mask; + } + }else if( flags & SQLITE_SHM_SHARED ){ + u16 allShared = 0; /* Union of locks held by connections other than "p" */ + + /* Find out which shared locks are already held by sibling connections. + ** If any sibling already holds an exclusive lock, go ahead and return + ** SQLITE_BUSY. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + allShared |= pX->sharedMask; + } + + /* Get shared locks at the system level, if necessary */ + if( rc==SQLITE_OK ){ + if( (allShared & mask)==0 ){ + rc = winShmSystemLock(pShmNode, _SHM_RDLCK, ofst+WIN_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + } + + /* Get the local shared locks */ + if( rc==SQLITE_OK ){ + p->sharedMask |= mask; + } + }else{ + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + } + + /* Get the exclusive locks at the system level. Then if successful + ** also mark the local connection as being locked. + */ + if( rc==SQLITE_OK ){ + rc = winShmSystemLock(pShmNode, _SHM_WRLCK, ofst+WIN_SHM_BASE, n); + if( rc==SQLITE_OK ){ + assert( (p->sharedMask & mask)==0 ); + p->exclMask |= mask; + } + } + } + sqlite3_mutex_leave(pShmNode->mutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x %s\n", + p->id, (int)GetCurrentProcessId(), p->sharedMask, p->exclMask, + rc ? "failed" : "ok")); + return rc; +} + +/* +** Implement a memory barrier or memory fence on shared memory. +** +** All loads and stores begun before the barrier must complete before +** any load or store begun after the barrier. +*/ +static void winShmBarrier( + sqlite3_file *fd /* Database holding the shared memory */ +){ + UNUSED_PARAMETER(fd); + /* MemoryBarrier(); // does not work -- do not know why not */ + winShmEnterMutex(); + winShmLeaveMutex(); +} + +/* +** This function is called to obtain a pointer to region iRegion of the +** shared-memory associated with the database file fd. Shared-memory regions +** are numbered starting from zero. Each shared-memory region is szRegion +** bytes in size. +** +** If an error occurs, an error code is returned and *pp is set to NULL. +** +** Otherwise, if the isWrite parameter is 0 and the requested shared-memory +** region has not been allocated (by any client, including one running in a +** separate process), then *pp is set to NULL and SQLITE_OK returned. If +** isWrite is non-zero and the requested shared-memory region has not yet +** been allocated, it is allocated by this function. +** +** If the shared-memory region has already been allocated or is allocated by +** this call as described above, then it is mapped into this processes +** address space (if it is not already), *pp is set to point to the mapped +** memory and SQLITE_OK returned. +*/ +static int winShmMap( + sqlite3_file *fd, /* Handle open on database file */ + int iRegion, /* Region to retrieve */ + int szRegion, /* Size of regions */ + int isWrite, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + winFile *pDbFd = (winFile*)fd; + winShm *p = pDbFd->pShm; + winShmNode *pShmNode; + int rc = SQLITE_OK; + + if( !p ){ + rc = winOpenSharedMemory(pDbFd); + if( rc!=SQLITE_OK ) return rc; + p = pDbFd->pShm; + } + pShmNode = p->pShmNode; + + sqlite3_mutex_enter(pShmNode->mutex); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + + if( pShmNode->nRegion<=iRegion ){ + struct ShmRegion *apNew; /* New aRegion[] array */ + int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ + sqlite3_int64 sz; /* Current size of wal-index file */ + + pShmNode->szRegion = szRegion; + + /* The requested region is not mapped into this processes address space. + ** Check to see if it has been allocated (i.e. if the wal-index file is + ** large enough to contain the requested region). + */ + rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); + if( rc!=SQLITE_OK ){ + rc = SQLITE_IOERR_SHMSIZE; + goto shmpage_out; + } + + if( szhFile, nByte); + if( rc!=SQLITE_OK ){ + rc = SQLITE_IOERR_SHMSIZE; + goto shmpage_out; + } + } + + /* Map the requested memory region into this processes address space. */ + apNew = (struct ShmRegion *)sqlite3_realloc( + pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) + ); + if( !apNew ){ + rc = SQLITE_IOERR_NOMEM; + goto shmpage_out; + } + pShmNode->aRegion = apNew; + + while( pShmNode->nRegion<=iRegion ){ + HANDLE hMap; /* file-mapping handle */ + void *pMap = 0; /* Mapped memory region */ + + hMap = CreateFileMapping(pShmNode->hFile.h, + NULL, PAGE_READWRITE, 0, nByte, NULL + ); + if( hMap ){ + pMap = MapViewOfFile(hMap, FILE_MAP_WRITE | FILE_MAP_READ, + 0, 0, nByte + ); + } + if( !pMap ){ + pShmNode->lastErrno = GetLastError(); + rc = SQLITE_IOERR; + if( hMap ) CloseHandle(hMap); + goto shmpage_out; + } + + pShmNode->aRegion[pShmNode->nRegion].pMap = pMap; + pShmNode->aRegion[pShmNode->nRegion].hMap = hMap; + pShmNode->nRegion++; + } + } + +shmpage_out: + if( pShmNode->nRegion>iRegion ){ + char *p = (char *)pShmNode->aRegion[iRegion].pMap; + *pp = (void *)&p[iRegion*szRegion]; + }else{ + *pp = 0; + } + sqlite3_mutex_leave(pShmNode->mutex); + return rc; +} + +#else +# define winShmMap 0 +# define winShmLock 0 +# define winShmBarrier 0 +# define winShmUnmap 0 +#endif /* #ifndef SQLITE_OMIT_WAL */ + +/* +** Here ends the implementation of all sqlite3_file methods. +** +********************** End sqlite3_file Methods ******************************* +******************************************************************************/ + /* ** This vector defines all the methods that can operate on an ** sqlite3_file for win32. */ static const sqlite3_io_methods winIoMethod = { - 1, /* iVersion */ - winClose, - winRead, - winWrite, - winTruncate, - winSync, - winFileSize, - winLock, - winUnlock, - winCheckReservedLock, - winFileControl, - winSectorSize, - winDeviceCharacteristics + 2, /* iVersion */ + winClose, /* xClose */ + winRead, /* xRead */ + winWrite, /* xWrite */ + winTruncate, /* xTruncate */ + winSync, /* xSync */ + winFileSize, /* xFileSize */ + winLock, /* xLock */ + winUnlock, /* xUnlock */ + winCheckReservedLock, /* xCheckReservedLock */ + winFileControl, /* xFileControl */ + winSectorSize, /* xSectorSize */ + winDeviceCharacteristics, /* xDeviceCharacteristics */ + winShmMap, /* xShmMap */ + winShmLock, /* xShmLock */ + winShmBarrier, /* xShmBarrier */ + winShmUnmap /* xShmUnmap */ }; -/*************************************************************************** -** Here ends the I/O methods that form the sqlite3_io_methods object. +/**************************************************************************** +**************************** sqlite3_vfs methods **************************** ** -** The next block of code implements the VFS methods. -****************************************************************************/ +** This division contains the implementation of methods on the +** sqlite3_vfs object. +*/ /* ** Convert a UTF-8 filename into whatever form the underlying @@ -1197,6 +1851,13 @@ static int getTempname(int nBuf, char *zBuf){ "0123456789"; size_t i, j; char zTempPath[MAX_PATH+1]; + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. + */ + SimulateIOError( return SQLITE_IOERR ); + if( sqlite3_temp_directory ){ sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", sqlite3_temp_directory); }else if( isNT() ){ @@ -1228,17 +1889,27 @@ static int getTempname(int nBuf, char *zBuf){ } #endif } + + /* Check that the output buffer is large enough for the temporary file + ** name. If it is not, return SQLITE_ERROR. + */ + if( (sqlite3Strlen30(zTempPath) + sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX) + 17) >= nBuf ){ + return SQLITE_ERROR; + } + for(i=sqlite3Strlen30(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){} zTempPath[i] = 0; - sqlite3_snprintf(nBuf-30, zBuf, + + sqlite3_snprintf(nBuf-17, zBuf, "%s\\"SQLITE_TEMP_FILE_PREFIX, zTempPath); j = sqlite3Strlen30(zBuf); - sqlite3_randomness(20, &zBuf[j]); - for(i=0; i<20; i++, j++){ + sqlite3_randomness(15, &zBuf[j]); + for(i=0; i<15; i++, j++){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0; - OSTRACE2("TEMP FILENAME: %s\n", zBuf); + + OSTRACE(("TEMP FILENAME: %s\n", zBuf)); return SQLITE_OK; } @@ -1330,6 +2001,8 @@ static int winOpen( assert( id!=0 ); UNUSED_PARAMETER(pVfs); + pFile->h = INVALID_HANDLE_VALUE; + /* If the second argument to this function is NULL, generate a ** temporary file name to use */ @@ -1411,7 +2084,11 @@ static int winOpen( ); #endif } + OSTRACE(("OPEN %d %s 0x%lx %s\n", + h, zName, dwDesiredAccess, + h==INVALID_HANDLE_VALUE ? "failed" : "ok")); if( h==INVALID_HANDLE_VALUE ){ + pFile->lastErrno = GetLastError(); free(zConverted); if( flags & SQLITE_OPEN_READWRITE ){ return winOpen(pVfs, zName, id, @@ -1431,6 +2108,9 @@ static int winOpen( pFile->pMethod = &winIoMethod; pFile->h = h; pFile->lastErrno = NO_ERROR; + pFile->pVfs = pVfs; + pFile->pShm = 0; + pFile->zPath = zName; pFile->sectorSize = getSectorSize(pVfs, zUtf8Name); #if SQLITE_OS_WINCE if( (flags & (SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB)) == @@ -1473,13 +2153,15 @@ static int winDelete( int cnt = 0; DWORD rc; DWORD error = 0; - void *zConverted = convertUtf8Filename(zFilename); + void *zConverted; UNUSED_PARAMETER(pVfs); UNUSED_PARAMETER(syncDir); + + SimulateIOError(return SQLITE_IOERR_DELETE); + zConverted = convertUtf8Filename(zFilename); if( zConverted==0 ){ return SQLITE_NOMEM; } - SimulateIOError(return SQLITE_IOERR_DELETE); if( isNT() ){ do{ DeleteFileW(zConverted); @@ -1502,7 +2184,10 @@ static int winDelete( #endif } free(zConverted); - OSTRACE2("DELETE \"%s\"\n", zFilename); + OSTRACE(("DELETE \"%s\" %s\n", zFilename, + ( (rc==INVALID_FILE_ATTRIBUTES) && (error==ERROR_FILE_NOT_FOUND)) ? + "ok" : "failed" )); + return ( (rc == INVALID_FILE_ATTRIBUTES) && (error == ERROR_FILE_NOT_FOUND)) ? SQLITE_OK : SQLITE_IOERR_DELETE; } @@ -1518,13 +2203,38 @@ static int winAccess( ){ DWORD attr; int rc = 0; - void *zConverted = convertUtf8Filename(zFilename); + void *zConverted; UNUSED_PARAMETER(pVfs); + + SimulateIOError( return SQLITE_IOERR_ACCESS; ); + zConverted = convertUtf8Filename(zFilename); if( zConverted==0 ){ return SQLITE_NOMEM; } if( isNT() ){ - attr = GetFileAttributesW((WCHAR*)zConverted); + WIN32_FILE_ATTRIBUTE_DATA sAttrData; + memset(&sAttrData, 0, sizeof(sAttrData)); + if( GetFileAttributesExW((WCHAR*)zConverted, + GetFileExInfoStandard, + &sAttrData) ){ + /* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file + ** as if it does not exist. + */ + if( flags==SQLITE_ACCESS_EXISTS + && sAttrData.nFileSizeHigh==0 + && sAttrData.nFileSizeLow==0 ){ + attr = INVALID_FILE_ATTRIBUTES; + }else{ + attr = sAttrData.dwFileAttributes; + } + }else{ + if( GetLastError()!=ERROR_FILE_NOT_FOUND ){ + free(zConverted); + return SQLITE_IOERR_ACCESS; + }else{ + attr = INVALID_FILE_ATTRIBUTES; + } + } /* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. ** Since the ASCII version of these Windows API do not exist for WINCE, ** it's important to not reference them for WINCE builds. @@ -1564,12 +2274,14 @@ static int winFullPathname( ){ #if defined(__CYGWIN__) + SimulateIOError( return SQLITE_ERROR ); UNUSED_PARAMETER(nFull); cygwin_conv_to_full_win32_path(zRelative, zFull); return SQLITE_OK; #endif #if SQLITE_OS_WINCE + SimulateIOError( return SQLITE_ERROR ); UNUSED_PARAMETER(nFull); /* WinCE has no concept of a relative pathname, or so I am told. */ sqlite3_snprintf(pVfs->mxPathname, zFull, "%s", zRelative); @@ -1580,6 +2292,13 @@ static int winFullPathname( int nByte; void *zConverted; char *zOut; + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. This function could fail if, for example, the + ** current working directory has been unlinked. + */ + SimulateIOError( return SQLITE_ERROR ); UNUSED_PARAMETER(nFull); zConverted = convertUtf8Filename(zRelative); if( isNT() ){ @@ -1647,7 +2366,9 @@ static int getSectorSize( ** to get the drive letter to look up the sector ** size. */ + SimulateIOErrorBenign(1); rc = winFullPathname(pVfs, zRelative, MAX_PATH, zFullpath); + SimulateIOErrorBenign(0); if( rc == SQLITE_OK ) { void *zConverted = convertUtf8Filename(zFullpath); @@ -1795,34 +2516,32 @@ static int winSleep(sqlite3_vfs *pVfs, int microsec){ } /* -** The following variable, if set to a non-zero value, becomes the result -** returned from sqlite3OsCurrentTime(). This is used for testing. +** The following variable, if set to a non-zero value, is interpreted as +** the number of seconds since 1970 and is used to set the result of +** sqlite3OsCurrentTime() during testing. */ #ifdef SQLITE_TEST -int sqlite3_current_time = 0; +int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ #endif /* -** Find the current time (in Universal Coordinated Time). Write the -** current time and date as a Julian Day number into *prNow and -** return 0. Return 1 if the time and date cannot be found. +** Find the current time (in Universal Coordinated Time). Write into *piNow +** the current time and date as a Julian Day number times 86_400_000. In +** other words, write into *piNow the number of milliseconds since the Julian +** epoch of noon in Greenwich on November 24, 4714 B.C according to the +** proleptic Gregorian calendar. +** +** On success, return 0. Return 1 if the time and date cannot be found. */ -int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ - FILETIME ft; +static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){ /* FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). */ - sqlite3_int64 timeW; /* Whole days */ - sqlite3_int64 timeF; /* Fractional Days */ - - /* Number of 100-nanosecond intervals in a single day */ - static const sqlite3_int64 ntuPerDay = - 10000000*(sqlite3_int64)86400; - - /* Number of 100-nanosecond intervals in half of a day */ - static const sqlite3_int64 ntuPerHalfDay = - 10000000*(sqlite3_int64)43200; - + FILETIME ft; + static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000; +#ifdef SQLITE_TEST + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; +#endif /* 2^32 - to avoid use of LL and warnings in gcc */ static const sqlite3_int64 max32BitValue = (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 + (sqlite3_int64)294967296; @@ -1837,23 +2556,35 @@ int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ #else GetSystemTimeAsFileTime( &ft ); #endif - UNUSED_PARAMETER(pVfs); - timeW = (((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + (sqlite3_int64)ft.dwLowDateTime; - timeF = timeW % ntuPerDay; /* fractional days (100-nanoseconds) */ - timeW = timeW / ntuPerDay; /* whole days */ - timeW = timeW + 2305813; /* add whole days (from 2305813.5) */ - timeF = timeF + ntuPerHalfDay; /* add half a day (from 2305813.5) */ - timeW = timeW + (timeF/ntuPerDay); /* add whole day if half day made one */ - timeF = timeF % ntuPerDay; /* compute new fractional days */ - *prNow = (double)timeW + ((double)timeF / (double)ntuPerDay); + + *piNow = winFiletimeEpoch + + ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + + (sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000; + #ifdef SQLITE_TEST if( sqlite3_current_time ){ - *prNow = ((double)sqlite3_current_time + (double)43200) / (double)86400 + (double)2440587; + *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; } #endif + UNUSED_PARAMETER(pVfs); return 0; } +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ + int rc; + sqlite3_int64 i; + rc = winCurrentTimeInt64(pVfs, &i); + if( !rc ){ + *prNow = i/86400000.0; + } + return rc; +} + /* ** The idea is that this function works like a combination of ** GetLastError() and FormatMessage() on windows (or errno and @@ -1889,30 +2620,32 @@ static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ return getLastErrorMsg(nBuf, zBuf); } + + /* ** Initialize and deinitialize the operating system interface. */ int sqlite3_os_init(void){ static sqlite3_vfs winVfs = { - 1, /* iVersion */ - sizeof(winFile), /* szOsFile */ - MAX_PATH, /* mxPathname */ - 0, /* pNext */ - "win32", /* zName */ - 0, /* pAppData */ - - winOpen, /* xOpen */ - winDelete, /* xDelete */ - winAccess, /* xAccess */ - winFullPathname, /* xFullPathname */ - winDlOpen, /* xDlOpen */ - winDlError, /* xDlError */ - winDlSym, /* xDlSym */ - winDlClose, /* xDlClose */ - winRandomness, /* xRandomness */ - winSleep, /* xSleep */ - winCurrentTime, /* xCurrentTime */ - winGetLastError /* xGetLastError */ + 2, /* iVersion */ + sizeof(winFile), /* szOsFile */ + MAX_PATH, /* mxPathname */ + 0, /* pNext */ + "win32", /* zName */ + 0, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ }; sqlite3_vfs_register(&winVfs, 1); diff --git a/src/pager.c b/src/pager.c index 180b3c7..dfb29e3 100644 --- a/src/pager.c +++ b/src/pager.c @@ -20,6 +20,88 @@ */ #ifndef SQLITE_OMIT_DISKIO #include "sqliteInt.h" +#include "wal.h" + +/* +******************** NOTES ON THE DESIGN OF THE PAGER ************************ +** +** Within this comment block, a page is deemed to have been synced +** automatically as soon as it is written when PRAGMA synchronous=OFF. +** Otherwise, the page is not synced until the xSync method of the VFS +** is called successfully on the file containing the page. +** +** Definition: A page of the database file is said to be "overwriteable" if +** one or more of the following are true about the page: +** +** (a) The original content of the page as it was at the beginning of +** the transaction has been written into the rollback journal and +** synced. +** +** (b) The page was a freelist leaf page at the start of the transaction. +** +** (c) The page number is greater than the largest page that existed in +** the database file at the start of the transaction. +** +** (1) A page of the database file is never overwritten unless one of the +** following are true: +** +** (a) The page and all other pages on the same sector are overwriteable. +** +** (b) The atomic page write optimization is enabled, and the entire +** transaction other than the update of the transaction sequence +** number consists of a single page change. +** +** (2) The content of a page written into the rollback journal exactly matches +** both the content in the database when the rollback journal was written +** and the content in the database at the beginning of the current +** transaction. +** +** (3) Writes to the database file are an integer multiple of the page size +** in length and are aligned to a page boundary. +** +** (4) Reads from the database file are either aligned on a page boundary and +** an integer multiple of the page size in length or are taken from the +** first 100 bytes of the database file. +** +** (5) All writes to the database file are synced prior to the rollback journal +** being deleted, truncated, or zeroed. +** +** (6) If a master journal file is used, then all writes to the database file +** are synced prior to the master journal being deleted. +** +** Definition: Two databases (or the same database at two points it time) +** are said to be "logically equivalent" if they give the same answer to +** all queries. Note in particular the the content of freelist leaf +** pages can be changed arbitarily without effecting the logical equivalence +** of the database. +** +** (7) At any time, if any subset, including the empty set and the total set, +** of the unsynced changes to a rollback journal are removed and the +** journal is rolled back, the resulting database file will be logical +** equivalent to the database file at the beginning of the transaction. +** +** (8) When a transaction is rolled back, the xTruncate method of the VFS +** is called to restore the database file to the same size it was at +** the beginning of the transaction. (In some VFSes, the xTruncate +** method is a no-op, but that does not change the fact the SQLite will +** invoke it.) +** +** (9) Whenever the database file is modified, at least one bit in the range +** of bytes from 24 through 39 inclusive will be changed prior to releasing +** the EXCLUSIVE lock. +** +** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less +** than one billion transactions. +** +** (11) A database file is well-formed at the beginning and at the conclusion +** of every transaction. +** +** (12) An EXCLUSIVE lock is held on the database file when writing to +** the database file. +** +** (13) A SHARED lock is held on the database file while reading any +** content out of the database file. +*/ /* ** Macros for troubleshooting. Normally turned off @@ -139,6 +221,9 @@ struct PagerSavepoint { Bitvec *pInSavepoint; /* Set of pages in this savepoint */ Pgno nOrig; /* Original number of pages in file */ Pgno iSubRec; /* Index of first record in sub-journal */ +#ifndef SQLITE_OMIT_WAL + u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ +#endif }; /* @@ -197,7 +282,8 @@ struct PagerSavepoint { ** ** journalStarted ** -** This flag is set whenever the the main journal is synced. +** This flag is set whenever the the main journal is opened and +** initialized ** ** The point of this flag is that it must be set after the ** first journal header in a journal file has been synced to disk. @@ -221,9 +307,15 @@ struct PagerSavepoint { ** master journal name is only written to the journal file the first ** time CommitPhaseOne() is called. ** -** doNotSync +** doNotSpill, doNotSyncSpill ** -** This variable is set and cleared by sqlite3PagerWrite(). +** When enabled, cache spills are prohibited. The doNotSpill variable +** inhibits all cache spill and doNotSyncSpill inhibits those spills that +** would require a journal sync. The doNotSyncSpill is set and cleared +** by sqlite3PagerWrite() in order to prevent a journal sync from happening +** in between the journalling of two pages on the same sector. The +** doNotSpill value set to prevent pagerStress() from trying to use +** the journal during a rollback. ** ** needSync ** @@ -267,7 +359,8 @@ struct Pager { u8 journalStarted; /* True if header of journal is synced */ u8 changeCountDone; /* Set after incrementing the change-counter */ u8 setMaster; /* True if a m-j name has been written to jrnl */ - u8 doNotSync; /* Boolean. While true, do not spill the cache */ + u8 doNotSpill; /* Do not spill the cache when non-zero */ + u8 doNotSyncSpill; /* Do not do a spill that requires jrnl sync */ u8 dbSizeValid; /* Set when dbSize is correct */ u8 subjInMemory; /* True to use in-memory sub-journals */ Pgno dbSize; /* Number of pages in the database */ @@ -283,6 +376,7 @@ struct Pager { sqlite3_file *sjfd; /* File descriptor for sub-journal */ i64 journalOff; /* Current write offset in the journal file */ i64 journalHdr; /* Byte offset to previous journal header */ + i64 journalSizeLimit; /* Size limit for persistent journal files */ PagerSavepoint *aSavepoint; /* Array of active savepoints */ int nSavepoint; /* Number of elements in aSavepoint[] */ char dbFileVers[16]; /* Changes whenever database file changes */ @@ -309,9 +403,12 @@ struct Pager { void *pCodec; /* First argument to xCodec... methods */ #endif char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ - i64 journalSizeLimit; /* Size limit for persistent journal files */ PCache *pPCache; /* Pointer to page cache object */ sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */ +#ifndef SQLITE_OMIT_WAL + Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ + char *zWal; /* File name for write-ahead log */ +#endif }; /* @@ -825,6 +922,7 @@ static int writeJournalHdr(Pager *pPager){ for(nWrite=0; rc==SQLITE_OK&&nWritejournalHdr, nHeader)) rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff); + assert( pPager->journalHdr <= pPager->journalOff ); pPager->journalOff += nHeader; } @@ -983,6 +1081,7 @@ static int writeMasterJournal(Pager *pPager, const char *zMaster){ } pPager->setMaster = 1; assert( isOpen(pPager->jfd) ); + assert( pPager->journalHdr <= pPager->journalOff ); /* Calculate the length in bytes and the checksum of zMaster */ for(nMaster=0; zMaster[nMaster]; nMaster++){ @@ -1098,6 +1197,22 @@ static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ return rc; } +/* +** Return true if this pager uses a write-ahead log instead of the usual +** rollback journal. Otherwise false. +*/ +#ifndef SQLITE_OMIT_WAL +static int pagerUseWal(Pager *pPager){ + return (pPager->pWal!=0); +} +#else +# define pagerUseWal(x) 0 +# define pagerRollbackWal(x) 0 +# define pagerWalFrames(v,w,x,y,z) 0 +# define pagerOpenWalIfPresent(z) SQLITE_OK +# define pagerBeginReadTransaction(z) SQLITE_OK +#endif + /* ** Unlock the database file. This function is a no-op if the pager ** is in exclusive mode. @@ -1110,25 +1225,43 @@ static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ */ static void pager_unlock(Pager *pPager){ if( !pPager->exclusiveMode ){ - int rc; /* Return code */ + int rc = SQLITE_OK; /* Return code */ + int iDc = isOpen(pPager->fd)?sqlite3OsDeviceCharacteristics(pPager->fd):0; - /* Always close the journal file when dropping the database lock. - ** Otherwise, another connection with journal_mode=delete might - ** delete the file out from under us. + /* If the operating system support deletion of open files, then + ** close the journal file when dropping the database lock. Otherwise + ** another connection with journal_mode=delete might delete the file + ** out from under us. */ - sqlite3OsClose(pPager->jfd); + assert( (PAGER_JOURNALMODE_MEMORY & 5)!=1 ); + assert( (PAGER_JOURNALMODE_OFF & 5)!=1 ); + assert( (PAGER_JOURNALMODE_WAL & 5)!=1 ); + assert( (PAGER_JOURNALMODE_DELETE & 5)!=1 ); + assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); + assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); + if( 0==(iDc & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN) + || 1!=(pPager->journalMode & 5) + ){ + sqlite3OsClose(pPager->jfd); + } + sqlite3BitvecDestroy(pPager->pInJournal); pPager->pInJournal = 0; releaseAllSavepoints(pPager); /* If the file is unlocked, somebody else might change it. The ** values stored in Pager.dbSize etc. might become invalid if - ** this happens. TODO: Really, this doesn't need to be cleared + ** this happens. One can argue that this doesn't need to be cleared ** until the change-counter check fails in PagerSharedLock(). + ** Clearing the page size cache here is being conservative. */ pPager->dbSizeValid = 0; - rc = osUnlock(pPager->fd, NO_LOCK); + if( pagerUseWal(pPager) ){ + sqlite3WalEndReadTransaction(pPager->pWal); + }else{ + rc = osUnlock(pPager->fd, NO_LOCK); + } if( rc ){ pPager->errCode = rc; } @@ -1159,7 +1292,7 @@ static void pager_unlock(Pager *pPager){ ** to this function. ** ** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL -** the error becomes persistent. Until the persisten error is cleared, +** the error becomes persistent. Until the persistent error is cleared, ** subsequent API calls on this Pager will immediately return the same ** error code. ** @@ -1277,6 +1410,7 @@ static int pager_end_transaction(Pager *pPager, int hasMaster){ assert( isOpen(pPager->jfd) || pPager->pInJournal==0 ); if( isOpen(pPager->jfd) ){ + assert( !pagerUseWal(pPager) ); /* Finalize the journal file. */ if( sqlite3IsMemJournal(pPager->jfd) ){ @@ -1290,8 +1424,8 @@ static int pager_end_transaction(Pager *pPager, int hasMaster){ } pPager->journalOff = 0; pPager->journalStarted = 0; - }else if( pPager->exclusiveMode - || pPager->journalMode==PAGER_JOURNALMODE_PERSIST + }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST + || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) ){ rc = zeroJournalHdr(pPager, hasMaster); pager_error(pPager, rc); @@ -1301,9 +1435,11 @@ static int pager_end_transaction(Pager *pPager, int hasMaster){ /* This branch may be executed with Pager.journalMode==MEMORY if ** a hot-journal was just rolled back. In this case the journal ** file should be closed and deleted. If this connection writes to - ** the database file, it will do so using an in-memory journal. */ + ** the database file, it will do so using an in-memory journal. + */ assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE || pPager->journalMode==PAGER_JOURNALMODE_MEMORY + || pPager->journalMode==PAGER_JOURNALMODE_WAL ); sqlite3OsClose(pPager->jfd); if( !pPager->tempFile ){ @@ -1314,14 +1450,24 @@ static int pager_end_transaction(Pager *pPager, int hasMaster){ #ifdef SQLITE_CHECK_PAGES sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash); #endif - - sqlite3PcacheCleanAll(pPager->pPCache); - sqlite3BitvecDestroy(pPager->pInJournal); - pPager->pInJournal = 0; - pPager->nRec = 0; } + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + pPager->nRec = 0; + sqlite3PcacheCleanAll(pPager->pPCache); - if( !pPager->exclusiveMode ){ + if( pagerUseWal(pPager) ){ + rc2 = sqlite3WalEndWriteTransaction(pPager->pWal); + assert( rc2==SQLITE_OK ); + pPager->state = PAGER_SHARED; + + /* If the connection was in locking_mode=exclusive mode but is no longer, + ** drop the EXCLUSIVE lock held on the database file. + */ + if( !pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, 0) ){ + rc2 = osUnlock(pPager->fd, SHARED_LOCK); + } + }else if( !pPager->exclusiveMode ){ rc2 = osUnlock(pPager->fd, SHARED_LOCK); pPager->state = PAGER_SHARED; pPager->changeCountDone = 0; @@ -1372,6 +1518,21 @@ static u32 pager_cksum(Pager *pPager, const u8 *aData){ return cksum; } +/* +** Report the current page size and number of reserved bytes back +** to the codec. +*/ +#ifdef SQLITE_HAS_CODEC +static void pagerReportSize(Pager *pPager){ + if( pPager->xCodecSizeChng ){ + pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, + (int)pPager->nReserve); + } +} +#else +# define pagerReportSize(X) /* No-op if we do not support a codec */ +#endif + /* ** Read a single page from either the journal file (if isMainJrnl==1) or ** from the sub-journal (if isMainJrnl==0) and playback that page. @@ -1412,11 +1573,10 @@ static u32 pager_cksum(Pager *pPager, const u8 *aData){ */ static int pager_playback_one_page( Pager *pPager, /* The pager being played back */ - int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ - int isUnsync, /* True if reading from unsynced main journal */ i64 *pOffset, /* Offset of record to playback */ - int isSavepnt, /* True for a savepoint rollback */ - Bitvec *pDone /* Bitvec of pages already played back */ + Bitvec *pDone, /* Bitvec of pages already played back */ + int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ + int isSavepnt /* True for a savepoint rollback */ ){ int rc; PgHdr *pPg; /* An existing page in the cache */ @@ -1424,6 +1584,7 @@ static int pager_playback_one_page( u32 cksum; /* Checksum used for sanity checking */ char *aData; /* Temporary storage for the page */ sqlite3_file *jfd; /* The file descriptor for the journal file */ + int isSynced; /* True if journal page is synced */ assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */ assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */ @@ -1432,6 +1593,7 @@ static int pager_playback_one_page( aData = pPager->pTmpSpace; assert( aData ); /* Temp storage must have already been allocated */ + assert( pagerUseWal(pPager)==0 || (!isMainJrnl && isSavepnt) ); /* Read the page number and page data from the journal or sub-journal ** file. Return an error code to the caller if an IO error occurs. @@ -1463,12 +1625,21 @@ static int pager_playback_one_page( } } + /* If this page has already been played by before during the current + ** rollback, then don't bother to play it back again. + */ if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){ return rc; } - assert( pPager->state==PAGER_RESERVED || pPager->state>=PAGER_EXCLUSIVE ); + /* When playing back page 1, restore the nReserve setting + */ + if( pgno==1 && pPager->nReserve!=((u8*)aData)[20] ){ + pPager->nReserve = ((u8*)aData)[20]; + pagerReportSize(pPager); + } + /* If the pager is in RESERVED state, then there must be a copy of this ** page in the pager cache. In this case just update the pager cache, ** not the database file. The page is left marked dirty in this case. @@ -1501,18 +1672,28 @@ static int pager_playback_one_page( ** is possible to fail a statement on a database that does not yet exist. ** Do not attempt to write if database file has never been opened. */ - pPg = pager_lookup(pPager, pgno); + if( pagerUseWal(pPager) ){ + pPg = 0; + }else{ + pPg = pager_lookup(pPager, pgno); + } assert( pPg || !MEMDB ); PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData), (isMainJrnl?"main-journal":"sub-journal") )); + if( isMainJrnl ){ + isSynced = pPager->noSync || (*pOffset <= pPager->journalHdr); + }else{ + isSynced = (pPg==0 || 0==(pPg->flags & PGHDR_NEED_SYNC)); + } if( (pPager->state>=PAGER_EXCLUSIVE) - && (pPg==0 || 0==(pPg->flags&PGHDR_NEED_SYNC)) && isOpen(pPager->fd) - && !isUnsync + && isSynced ){ i64 ofst = (pgno-1)*(i64)pPager->pageSize; + testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 ); + assert( !pagerUseWal(pPager) ); rc = sqlite3OsWrite(pPager->fd, (u8*)aData, pPager->pageSize, ofst); if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; @@ -1540,9 +1721,12 @@ static int pager_playback_one_page( ** requiring a journal-sync before it is written. */ assert( isSavepnt ); - if( (rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1))!=SQLITE_OK ){ - return rc; - } + assert( pPager->doNotSpill==0 ); + pPager->doNotSpill++; + rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1); + assert( pPager->doNotSpill==1 ); + pPager->doNotSpill--; + if( rc!=SQLITE_OK ) return rc; pPg->flags &= ~PGHDR_NEED_READ; sqlite3PcacheMakeDirty(pPg); } @@ -1561,7 +1745,8 @@ static int pager_playback_one_page( /* If the contents of this page were just restored from the main ** journal file, then its content must be as they were when the ** transaction was first opened. In this case we can mark the page - ** as clean, since there will be no need to write it out to the. + ** as clean, since there will be no need to write it out to the + ** database. ** ** There is one exception to this rule. If the page is being rolled ** back as part of a savepoint (or statement) rollback from an @@ -1576,6 +1761,7 @@ static int pager_playback_one_page( ** segment is synced. If a crash occurs during or following this, ** database corruption may ensue. */ + assert( !pagerUseWal(pPager) ); sqlite3PcacheMakeClean(pPg); } #ifdef SQLITE_CHECK_PAGES @@ -1644,6 +1830,9 @@ static int pager_delmaster(Pager *pPager, const char *zMaster){ sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */ char *zMasterJournal = 0; /* Contents of master journal file */ i64 nMasterJournal; /* Size of master journal file */ + char *zJournal; /* Pointer to one journal within MJ file */ + char *zMasterPtr; /* Space to hold MJ filename from a journal file */ + int nMasterPtr; /* Amount of space allocated to zMasterPtr[] */ /* Allocate space for both the pJournal and pMaster file descriptors. ** If successful, open the master journal file for reading. @@ -1658,73 +1847,68 @@ static int pager_delmaster(Pager *pPager, const char *zMaster){ } if( rc!=SQLITE_OK ) goto delmaster_out; + /* Load the entire master journal file into space obtained from + ** sqlite3_malloc() and pointed to by zMasterJournal. Also obtain + ** sufficient space (in zMasterPtr) to hold the names of master + ** journal files extracted from regular rollback-journals. + */ rc = sqlite3OsFileSize(pMaster, &nMasterJournal); if( rc!=SQLITE_OK ) goto delmaster_out; + nMasterPtr = pVfs->mxPathname+1; + zMasterJournal = sqlite3Malloc((int)nMasterJournal + nMasterPtr + 1); + if( !zMasterJournal ){ + rc = SQLITE_NOMEM; + goto delmaster_out; + } + zMasterPtr = &zMasterJournal[nMasterJournal+1]; + rc = sqlite3OsRead(pMaster, zMasterJournal, (int)nMasterJournal, 0); + if( rc!=SQLITE_OK ) goto delmaster_out; + zMasterJournal[nMasterJournal] = 0; - if( nMasterJournal>0 ){ - char *zJournal; - char *zMasterPtr = 0; - int nMasterPtr = pVfs->mxPathname+1; - - /* Load the entire master journal file into space obtained from - ** sqlite3_malloc() and pointed to by zMasterJournal. - */ - zMasterJournal = sqlite3Malloc((int)nMasterJournal + nMasterPtr + 1); - if( !zMasterJournal ){ - rc = SQLITE_NOMEM; + zJournal = zMasterJournal; + while( (zJournal-zMasterJournal)journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager)); - isUnsync = 1; } /* If this is the first header read from the journal, truncate the @@ -1972,12 +2153,20 @@ static int pager_playback(Pager *pPager, int isHot){ pager_reset(pPager); needPagerReset = 0; } - rc = pager_playback_one_page(pPager,1,isUnsync,&pPager->journalOff,0,0); + rc = pager_playback_one_page(pPager,&pPager->journalOff,0,1,0); if( rc!=SQLITE_OK ){ if( rc==SQLITE_DONE ){ rc = SQLITE_OK; pPager->journalOff = szJ; break; + }else if( rc==SQLITE_IOERR_SHORT_READ ){ + /* If the journal has been truncated, simply stop reading and + ** processing the journal. This might happen if the journal was + ** not completely written and synced prior to a crash. In that + ** case, the database should have never been written in the + ** first place so it is OK to simply abandon the rollback. */ + rc = SQLITE_OK; + goto end_playback; }else{ /* If we are unable to rollback, quit and return the error ** code. This will cause the pager to enter the error state @@ -2022,6 +2211,9 @@ end_playback: if( rc==SQLITE_OK && pPager->noSync==0 && pPager->state>=PAGER_EXCLUSIVE ){ rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); } + if( rc==SQLITE_OK && pPager->noSync==0 && pPager->state>=PAGER_EXCLUSIVE ){ + rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); + } if( rc==SQLITE_OK ){ rc = pager_end_transaction(pPager, zMaster[0]!='\0'); testcase( rc!=SQLITE_OK ); @@ -2042,6 +2234,262 @@ end_playback: return rc; } + +/* +** Read the content for page pPg out of the database file and into +** pPg->pData. A shared lock or greater must be held on the database +** file before this function is called. +** +** If page 1 is read, then the value of Pager.dbFileVers[] is set to +** the value read from the database file. +** +** If an IO error occurs, then the IO error is returned to the caller. +** Otherwise, SQLITE_OK is returned. +*/ +static int readDbPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ + Pgno pgno = pPg->pgno; /* Page number to read */ + int rc = SQLITE_OK; /* Return code */ + int isInWal = 0; /* True if page is in log file */ + int pgsz = pPager->pageSize; /* Number of bytes to read */ + + assert( pPager->state>=PAGER_SHARED && !MEMDB ); + assert( isOpen(pPager->fd) ); + + if( NEVER(!isOpen(pPager->fd)) ){ + assert( pPager->tempFile ); + memset(pPg->pData, 0, pPager->pageSize); + return SQLITE_OK; + } + + if( pagerUseWal(pPager) ){ + /* Try to pull the page from the write-ahead log. */ + rc = sqlite3WalRead(pPager->pWal, pgno, &isInWal, pgsz, pPg->pData); + } + if( rc==SQLITE_OK && !isInWal ){ + i64 iOffset = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsRead(pPager->fd, pPg->pData, pgsz, iOffset); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + } + + if( pgno==1 ){ + if( rc ){ + /* If the read is unsuccessful, set the dbFileVers[] to something + ** that will never be a valid file version. dbFileVers[] is a copy + ** of bytes 24..39 of the database. Bytes 28..31 should always be + ** zero or the size of the database in page. Bytes 32..35 and 35..39 + ** should be page numbers which are never 0xffffffff. So filling + ** pPager->dbFileVers[] with all 0xff bytes should suffice. + ** + ** For an encrypted database, the situation is more complex: bytes + ** 24..39 of the database are white noise. But the probability of + ** white noising equaling 16 bytes of 0xff is vanishingly small so + ** we should still be ok. + */ + memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers)); + }else{ + u8 *dbFileVers = &((u8*)pPg->pData)[24]; + memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); + } + } + CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM); + + PAGER_INCR(sqlite3_pager_readdb_count); + PAGER_INCR(pPager->nRead); + IOTRACE(("PGIN %p %d\n", pPager, pgno)); + PAGERTRACE(("FETCH %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_pagehash(pPg))); + + return rc; +} + +#ifndef SQLITE_OMIT_WAL +/* +** This function is invoked once for each page that has already been +** written into the log file when a WAL transaction is rolled back. +** Parameter iPg is the page number of said page. The pCtx argument +** is actually a pointer to the Pager structure. +** +** If page iPg is present in the cache, and has no outstanding references, +** it is discarded. Otherwise, if there are one or more outstanding +** references, the page content is reloaded from the database. If the +** attempt to reload content from the database is required and fails, +** return an SQLite error code. Otherwise, SQLITE_OK. +*/ +static int pagerUndoCallback(void *pCtx, Pgno iPg){ + int rc = SQLITE_OK; + Pager *pPager = (Pager *)pCtx; + PgHdr *pPg; + + pPg = sqlite3PagerLookup(pPager, iPg); + if( pPg ){ + if( sqlite3PcachePageRefcount(pPg)==1 ){ + sqlite3PcacheDrop(pPg); + }else{ + rc = readDbPage(pPg); + if( rc==SQLITE_OK ){ + pPager->xReiniter(pPg); + } + sqlite3PagerUnref(pPg); + } + } + + /* Normally, if a transaction is rolled back, any backup processes are + ** updated as data is copied out of the rollback journal and into the + ** database. This is not generally possible with a WAL database, as + ** rollback involves simply truncating the log file. Therefore, if one + ** or more frames have already been written to the log (and therefore + ** also copied into the backup databases) as part of this transaction, + ** the backups must be restarted. + */ + sqlite3BackupRestart(pPager->pBackup); + + return rc; +} + +/* +** This function is called to rollback a transaction on a WAL database. +*/ +static int pagerRollbackWal(Pager *pPager){ + int rc; /* Return Code */ + PgHdr *pList; /* List of dirty pages to revert */ + + /* For all pages in the cache that are currently dirty or have already + ** been written (but not committed) to the log file, do one of the + ** following: + ** + ** + Discard the cached page (if refcount==0), or + ** + Reload page content from the database (if refcount>0). + */ + pPager->dbSize = pPager->dbOrigSize; + rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager); + pList = sqlite3PcacheDirtyList(pPager->pPCache); + while( pList && rc==SQLITE_OK ){ + PgHdr *pNext = pList->pDirty; + rc = pagerUndoCallback((void *)pPager, pList->pgno); + pList = pNext; + } + + return rc; +} + +/* +** This function is a wrapper around sqlite3WalFrames(). As well as logging +** the contents of the list of pages headed by pList (connected by pDirty), +** this function notifies any active backup processes that the pages have +** changed. +*/ +static int pagerWalFrames( + Pager *pPager, /* Pager object */ + PgHdr *pList, /* List of frames to log */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit, /* True if this is a commit */ + int sync_flags /* Flags to pass to OsSync() (or 0) */ +){ + int rc; /* Return code */ + + assert( pPager->pWal ); + rc = sqlite3WalFrames(pPager->pWal, + pPager->pageSize, pList, nTruncate, isCommit, sync_flags + ); + if( rc==SQLITE_OK && pPager->pBackup ){ + PgHdr *p; + for(p=pList; p; p=p->pDirty){ + sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData); + } + } + return rc; +} + +/* +** Begin a read transaction on the WAL. +** +** This routine used to be called "pagerOpenSnapshot()" because it essentially +** makes a snapshot of the database at the current point in time and preserves +** that snapshot for use by the reader in spite of concurrently changes by +** other writers or checkpointers. +*/ +static int pagerBeginReadTransaction(Pager *pPager){ + int rc; /* Return code */ + int changed = 0; /* True if cache must be reset */ + + assert( pagerUseWal(pPager) ); + + /* sqlite3WalEndReadTransaction() was not called for the previous + ** transaction in locking_mode=EXCLUSIVE. So call it now. If we + ** are in locking_mode=NORMAL and EndRead() was previously called, + ** the duplicate call is harmless. + */ + sqlite3WalEndReadTransaction(pPager->pWal); + + rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed); + if( rc==SQLITE_OK ){ + int dummy; + if( changed ){ + pager_reset(pPager); + assert( pPager->errCode || pPager->dbSizeValid==0 ); + } + rc = sqlite3PagerPagecount(pPager, &dummy); + } + pPager->state = PAGER_SHARED; + + return rc; +} + +/* +** Check if the *-wal file that corresponds to the database opened by pPager +** exists if the database is not empy, or verify that the *-wal file does +** not exist (by deleting it) if the database file is empty. +** +** If the database is not empty and the *-wal file exists, open the pager +** in WAL mode. If the database is empty or if no *-wal file exists and +** if no error occurs, make sure Pager.journalMode is not set to +** PAGER_JOURNALMODE_WAL. +** +** Return SQLITE_OK or an error code. +** +** If the WAL file is opened, also open a snapshot (read transaction). +** +** The caller must hold a SHARED lock on the database file to call this +** function. Because an EXCLUSIVE lock on the db file is required to delete +** a WAL on a none-empty database, this ensures there is no race condition +** between the xAccess() below and an xDelete() being executed by some +** other connection. +*/ +static int pagerOpenWalIfPresent(Pager *pPager){ + int rc = SQLITE_OK; + if( !pPager->tempFile ){ + int isWal; /* True if WAL file exists */ + int nPage; /* Size of the database file */ + assert( pPager->state>=SHARED_LOCK ); + rc = sqlite3PagerPagecount(pPager, &nPage); + if( rc ) return rc; + if( nPage==0 ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0); + isWal = 0; + }else{ + rc = sqlite3OsAccess( + pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &isWal + ); + } + if( rc==SQLITE_OK ){ + if( isWal ){ + pager_reset(pPager); + rc = sqlite3PagerOpenWal(pPager, 0); + if( rc==SQLITE_OK ){ + rc = pagerBeginReadTransaction(pPager); + } + }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){ + pPager->journalMode = PAGER_JOURNALMODE_DELETE; + } + } + } + return rc; +} +#endif + /* ** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback ** the entire master journal file. The case pSavepoint==NULL occurs when @@ -2098,6 +2546,11 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ ** being reverted was opened. */ pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize; + pPager->changeCountDone = pPager->tempFile; + + if( !pSavepoint && pagerUseWal(pPager) ){ + return pagerRollbackWal(pPager); + } /* Use pPager->journalOff as the effective size of the main rollback ** journal. The actual file might be larger than this in @@ -2105,6 +2558,7 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ ** past pPager->journalOff is off-limits to us. */ szJ = pPager->journalOff; + assert( pagerUseWal(pPager)==0 || szJ==0 ); /* Begin by rolling back records from the main journal starting at ** PagerSavepoint.iOffset and continuing to the next journal header. @@ -2113,11 +2567,11 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ ** will be skipped automatically. Pages are added to pDone as they ** are played back. */ - if( pSavepoint ){ + if( pSavepoint && !pagerUseWal(pPager) ){ iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ; pPager->journalOff = pSavepoint->iOffset; while( rc==SQLITE_OK && pPager->journalOffjournalOff, 1, pDone); + rc = pager_playback_one_page(pPager, &pPager->journalOff, pDone, 1, 1); } assert( rc!=SQLITE_DONE ); }else{ @@ -2147,11 +2601,11 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager)); } for(ii=0; rc==SQLITE_OK && iijournalOffjournalOff, 1, pDone); + rc = pager_playback_one_page(pPager, &pPager->journalOff, pDone, 1, 1); } assert( rc!=SQLITE_DONE ); } - assert( rc!=SQLITE_OK || pPager->journalOff==szJ ); + assert( rc!=SQLITE_OK || pPager->journalOff>=szJ ); /* Finally, rollback pages from the sub-journal. Page that were ** previously rolled back out of the main journal (and are hence in pDone) @@ -2160,9 +2614,13 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ if( pSavepoint ){ u32 ii; /* Loop counter */ i64 offset = pSavepoint->iSubRec*(4+pPager->pageSize); + + if( pagerUseWal(pPager) ){ + rc = sqlite3WalSavepointUndo(pPager->pWal, pSavepoint->aWalData); + } for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && iinSubRec; ii++){ assert( offset==ii*(4+pPager->pageSize) ); - rc = pager_playback_one_page(pPager, 0, 0, &offset, 1, pDone); + rc = pager_playback_one_page(pPager, &offset, pDone, 0, 1); } assert( rc!=SQLITE_DONE ); } @@ -2171,6 +2629,7 @@ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ if( rc==SQLITE_OK ){ pPager->journalOff = szJ; } + return rc; } @@ -2288,21 +2747,6 @@ void sqlite3PagerSetBusyhandler( pPager->pBusyHandlerArg = pBusyHandlerArg; } -/* -** Report the current page size and number of reserved bytes back -** to the codec. -*/ -#ifdef SQLITE_HAS_CODEC -static void pagerReportSize(Pager *pPager){ - if( pPager->xCodecSizeChng ){ - pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, - (int)pPager->nReserve); - } -} -#else -# define pagerReportSize(X) /* No-op if we do not support a codec */ -#endif - /* ** Change the page size used by the Pager object. The new page size ** is passed in *pPageSize. @@ -2383,10 +2827,14 @@ void *sqlite3PagerTempSpace(Pager *pPager){ ** Regardless of mxPage, return the current maximum page count. */ int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ + int nPage; if( mxPage>0 ){ pPager->mxPgno = mxPage; } - sqlite3PagerPagecount(pPager, 0); + if( pPager->state!=PAGER_UNLOCK ){ + sqlite3PagerPagecount(pPager, &nPage); + assert( (int)pPager->mxPgno>=nPage ); + } return pPager->mxPgno; } @@ -2432,6 +2880,13 @@ int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ int rc = SQLITE_OK; memset(pDest, 0, N); assert( isOpen(pPager->fd) || pPager->tempFile ); + + /* This routine is only called by btree immediately after creating + ** the Pager object. There has not been an opportunity to transition + ** to WAL mode yet. + */ + assert( !pagerUseWal(pPager) ); + if( isOpen(pPager->fd) ){ IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) rc = sqlite3OsRead(pPager->fd, pDest, N, 0); @@ -2458,12 +2913,7 @@ int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ ** and *pnPage is set to the number of pages in the database. */ int sqlite3PagerPagecount(Pager *pPager, int *pnPage){ - Pgno nPage; /* Value to return via *pnPage */ - - /* If the pager is already in the error state, return the error code. */ - if( pPager->errCode ){ - return pPager->errCode; - } + Pgno nPage = 0; /* Value to return via *pnPage */ /* Determine the number of pages in the file. Store this in nPage. */ if( pPager->dbSizeValid ){ @@ -2472,15 +2922,23 @@ int sqlite3PagerPagecount(Pager *pPager, int *pnPage){ int rc; /* Error returned by OsFileSize() */ i64 n = 0; /* File size in bytes returned by OsFileSize() */ - assert( isOpen(pPager->fd) || pPager->tempFile ); - if( isOpen(pPager->fd) && (0 != (rc = sqlite3OsFileSize(pPager->fd, &n))) ){ - pager_error(pPager, rc); - return rc; + if( pagerUseWal(pPager) && pPager->state!=PAGER_UNLOCK ){ + sqlite3WalDbsize(pPager->pWal, &nPage); } - if( n>0 && npageSize ){ - nPage = 1; - }else{ - nPage = (Pgno)(n / pPager->pageSize); + + if( nPage==0 ){ + assert( isOpen(pPager->fd) || pPager->tempFile ); + if( isOpen(pPager->fd) ){ + if( SQLITE_OK!=(rc = sqlite3OsFileSize(pPager->fd, &n)) ){ + pager_error(pPager, rc); + return rc; + } + } + if( n>0 && npageSize ){ + nPage = 1; + }else{ + nPage = (Pgno)(n / pPager->pageSize); + } } if( pPager->state!=PAGER_UNLOCK ){ pPager->dbSize = nPage; @@ -2498,9 +2956,7 @@ int sqlite3PagerPagecount(Pager *pPager, int *pnPage){ } /* Set the output variable and return SQLITE_OK */ - if( pnPage ){ - *pnPage = nPage; - } + *pnPage = nPage; return SQLITE_OK; } @@ -2605,6 +3061,32 @@ void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ assertTruncateConstraint(pPager); } + +/* +** This function is called before attempting a hot-journal rollback. It +** syncs the journal file to disk, then sets pPager->journalHdr to the +** size of the journal file so that the pager_playback() routine knows +** that the entire journal file has been synced. +** +** Syncing a hot-journal to disk before attempting to roll it back ensures +** that if a power-failure occurs during the rollback, the process that +** attempts rollback following system recovery sees the same journal +** content as this process. +** +** If everything goes as planned, SQLITE_OK is returned. Otherwise, +** an SQLite error code. +*/ +static int pagerSyncHotJournal(Pager *pPager){ + int rc = SQLITE_OK; + if( !pPager->noSync ){ + rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_NORMAL); + } + if( rc==SQLITE_OK ){ + rc = sqlite3OsFileSize(pPager->jfd, &pPager->journalHdr); + } + return rc; +} + /* ** Shutdown the page cache. Free all memory and close all files. ** @@ -2620,10 +3102,19 @@ void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ ** to the caller. */ int sqlite3PagerClose(Pager *pPager){ + u8 *pTmp = (u8 *)pPager->pTmpSpace; + disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); pPager->errCode = 0; pPager->exclusiveMode = 0; +#ifndef SQLITE_OMIT_WAL + sqlite3WalClose(pPager->pWal, + (pPager->noSync ? 0 : pPager->sync_flags), + pPager->pageSize, pTmp + ); + pPager->pWal = 0; +#endif pager_reset(pPager); if( MEMDB ){ pager_unlock(pPager); @@ -2634,15 +3125,18 @@ int sqlite3PagerClose(Pager *pPager){ ** be played back into the database. If a power failure occurs while ** this is happening, the database may become corrupt. */ - pPager->journalHdr = -1; + if( isOpen(pPager->jfd) ){ + pPager->errCode = pagerSyncHotJournal(pPager); + } pagerUnlockAndRollback(pPager); } sqlite3EndBenignMalloc(); enable_simulated_io_errors(); PAGERTRACE(("CLOSE %d\n", PAGERID(pPager))); IOTRACE(("CLOSE %p\n", pPager)) + sqlite3OsClose(pPager->jfd); sqlite3OsClose(pPager->fd); - sqlite3PageFree(pPager->pTmpSpace); + sqlite3PageFree(pTmp); sqlite3PcacheClose(pPager->pPCache); #ifdef SQLITE_HAS_CODEC @@ -2724,7 +3218,7 @@ static int syncJournal(Pager *pPager){ ** mode, then the journal file may at this point actually be larger ** than Pager.journalOff bytes. If the next thing in the journal ** file happens to be a journal-header (written as part of the - ** previous connections transaction), and a crash or power-failure + ** previous connection's transaction), and a crash or power-failure ** occurs after nRec is updated but before this connection writes ** anything else to the journal file (or commits/rolls back its ** transaction), then SQLite may become confused when doing the @@ -2743,10 +3237,10 @@ static int syncJournal(Pager *pPager){ */ i64 iNextHdrOffset; u8 aMagic[8]; - u8 zHeader[sizeof(aJournalMagic)+4]; + u8 zHeader[sizeof(aJournalMagic)+4]; - memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); - put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); iNextHdrOffset = journalHdrOffset(pPager); rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset); @@ -2778,7 +3272,7 @@ static int syncJournal(Pager *pPager){ IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr)); rc = sqlite3OsWrite( pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr - ); + ); if( rc!=SQLITE_OK ) return rc; } if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ @@ -2796,6 +3290,7 @@ static int syncJournal(Pager *pPager){ */ pPager->needSync = 0; pPager->journalStarted = 1; + pPager->journalHdr = pPager->journalOff; sqlite3PcacheClearSyncFlags(pPager->pPCache); } @@ -2834,13 +3329,9 @@ static int syncJournal(Pager *pPager){ ** occurs, an IO error code is returned. Or, if the EXCLUSIVE lock cannot ** be obtained, SQLITE_BUSY is returned. */ -static int pager_write_pagelist(PgHdr *pList){ - Pager *pPager; /* Pager object */ +static int pager_write_pagelist(Pager *pPager, PgHdr *pList){ int rc; /* Return code */ - if( NEVER(pList==0) ) return SQLITE_OK; - pPager = pList->pPager; - /* At this point there may be either a RESERVED or EXCLUSIVE lock on the ** database file. If there is already an EXCLUSIVE lock, the following ** call is a no-op. @@ -2857,6 +3348,7 @@ static int pager_write_pagelist(PgHdr *pList){ ** EXCLUSIVE, it means the database file has been changed and any rollback ** will require a journal playback. */ + assert( !pagerUseWal(pPager) ); assert( pPager->state>=PAGER_RESERVED ); rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); @@ -2869,6 +3361,15 @@ static int pager_write_pagelist(PgHdr *pList){ rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags); } + /* Before the first write, give the VFS a hint of what the final + ** file size will be. + */ + assert( rc!=SQLITE_OK || isOpen(pPager->fd) ); + if( rc==SQLITE_OK && pPager->dbSize>(pPager->dbOrigSize+1) ){ + sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize; + sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile); + } + while( rc==SQLITE_OK && pList ){ Pgno pgno = pList->pgno; @@ -2921,6 +3422,26 @@ static int pager_write_pagelist(PgHdr *pList){ return rc; } +/* +** Ensure that the sub-journal file is open. If it is already open, this +** function is a no-op. +** +** SQLITE_OK is returned if everything goes according to plan. An +** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen() +** fails. +*/ +static int openSubJournal(Pager *pPager){ + int rc = SQLITE_OK; + if( !isOpen(pPager->sjfd) ){ + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ + sqlite3MemJournalOpen(pPager->sjfd); + }else{ + rc = pagerOpentemp(pPager, pPager->sjfd, SQLITE_OPEN_SUBJOURNAL); + } + } + return rc; +} + /* ** Append a record of the current state of page pPg to the sub-journal. ** It is the callers responsibility to use subjRequiresPage() to check @@ -2937,18 +3458,31 @@ static int pager_write_pagelist(PgHdr *pList){ static int subjournalPage(PgHdr *pPg){ int rc = SQLITE_OK; Pager *pPager = pPg->pPager; - if( isOpen(pPager->sjfd) ){ - void *pData = pPg->pData; - i64 offset = pPager->nSubRec*(4+pPager->pageSize); - char *pData2; + if( pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ - CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); - PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); - - assert( pageInJournal(pPg) || pPg->pgno>pPager->dbOrigSize ); - rc = write32bits(pPager->sjfd, offset, pPg->pgno); + /* Open the sub-journal, if it has not already been opened */ + assert( pPager->useJournal ); + assert( isOpen(pPager->jfd) || pagerUseWal(pPager) ); + assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 ); + assert( pagerUseWal(pPager) + || pageInJournal(pPg) + || pPg->pgno>pPager->dbOrigSize + ); + rc = openSubJournal(pPager); + + /* If the sub-journal was opened successfully (or was already open), + ** write the journal record into the file. */ if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); + void *pData = pPg->pData; + i64 offset = pPager->nSubRec*(4+pPager->pageSize); + char *pData2; + + CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); + PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); + rc = write32bits(pPager->sjfd, offset, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); + } } } if( rc==SQLITE_OK ){ @@ -2959,7 +3493,6 @@ static int subjournalPage(PgHdr *pPg){ return rc; } - /* ** This function is called by the pcache layer when it has reached some ** soft memory limit. The first argument is a pointer to a Pager object @@ -2986,74 +3519,83 @@ static int pagerStress(void *p, PgHdr *pPg){ assert( pPg->pPager==pPager ); assert( pPg->flags&PGHDR_DIRTY ); - /* The doNotSync flag is set by the sqlite3PagerWrite() function while it - ** is journalling a set of two or more database pages that are stored - ** on the same disk sector. Syncing the journal is not allowed while - ** this is happening as it is important that all members of such a - ** set of pages are synced to disk together. So, if the page this function - ** is trying to make clean will require a journal sync and the doNotSync - ** flag is set, return without doing anything. The pcache layer will - ** just have to go ahead and allocate a new page buffer instead of - ** reusing pPg. + /* The doNotSyncSpill flag is set during times when doing a sync of + ** journal (and adding a new header) is not allowed. This occurs + ** during calls to sqlite3PagerWrite() while trying to journal multiple + ** pages belonging to the same sector. ** - ** Similarly, if the pager has already entered the error state, do not - ** try to write the contents of pPg to disk. + ** The doNotSpill flag inhibits all cache spilling regardless of whether + ** or not a sync is required. This is set during a rollback. + ** + ** Spilling is also inhibited when in an error state. */ - if( NEVER(pPager->errCode) - || (pPager->doNotSync && pPg->flags&PGHDR_NEED_SYNC) - ){ + if( pPager->errCode ) return SQLITE_OK; + if( pPager->doNotSpill ) return SQLITE_OK; + if( pPager->doNotSyncSpill && (pPg->flags & PGHDR_NEED_SYNC)!=0 ){ return SQLITE_OK; } - /* Sync the journal file if required. */ - if( pPg->flags&PGHDR_NEED_SYNC ){ - rc = syncJournal(pPager); - if( rc==SQLITE_OK && pPager->fullSync && - !(pPager->journalMode==PAGER_JOURNALMODE_MEMORY) && - !(sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) - ){ - pPager->nRec = 0; - rc = writeJournalHdr(pPager); + pPg->pDirty = 0; + if( pagerUseWal(pPager) ){ + /* Write a single frame for this page to the log. */ + if( subjRequiresPage(pPg) ){ + rc = subjournalPage(pPg); + } + if( rc==SQLITE_OK ){ + rc = pagerWalFrames(pPager, pPg, 0, 0, 0); + } + }else{ + + /* Sync the journal file if required. */ + if( pPg->flags&PGHDR_NEED_SYNC ){ + assert( !pPager->noSync ); + rc = syncJournal(pPager); + if( rc==SQLITE_OK && + !(pPager->journalMode==PAGER_JOURNALMODE_MEMORY) && + !(sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) + ){ + pPager->nRec = 0; + rc = writeJournalHdr(pPager); + } + } + + /* If the page number of this page is larger than the current size of + ** the database image, it may need to be written to the sub-journal. + ** This is because the call to pager_write_pagelist() below will not + ** actually write data to the file in this case. + ** + ** Consider the following sequence of events: + ** + ** BEGIN; + ** + ** + ** SAVEPOINT sp; + ** + ** pagerStress(page X) + ** ROLLBACK TO sp; + ** + ** If (X>Y), then when pagerStress is called page X will not be written + ** out to the database file, but will be dropped from the cache. Then, + ** following the "ROLLBACK TO sp" statement, reading page X will read + ** data from the database file. This will be the copy of page X as it + ** was when the transaction started, not as it was when "SAVEPOINT sp" + ** was executed. + ** + ** The solution is to write the current data for page X into the + ** sub-journal file now (if it is not already there), so that it will + ** be restored to its current value when the "ROLLBACK TO sp" is + ** executed. + */ + if( NEVER( + rc==SQLITE_OK && pPg->pgno>pPager->dbSize && subjRequiresPage(pPg) + ) ){ + rc = subjournalPage(pPg); + } + + /* Write the contents of the page out to the database file. */ + if( rc==SQLITE_OK ){ + rc = pager_write_pagelist(pPager, pPg); } - } - - /* If the page number of this page is larger than the current size of - ** the database image, it may need to be written to the sub-journal. - ** This is because the call to pager_write_pagelist() below will not - ** actually write data to the file in this case. - ** - ** Consider the following sequence of events: - ** - ** BEGIN; - ** - ** - ** SAVEPOINT sp; - ** - ** pagerStress(page X) - ** ROLLBACK TO sp; - ** - ** If (X>Y), then when pagerStress is called page X will not be written - ** out to the database file, but will be dropped from the cache. Then, - ** following the "ROLLBACK TO sp" statement, reading page X will read - ** data from the database file. This will be the copy of page X as it - ** was when the transaction started, not as it was when "SAVEPOINT sp" - ** was executed. - ** - ** The solution is to write the current data for page X into the - ** sub-journal file now (if it is not already there), so that it will - ** be restored to its current value when the "ROLLBACK TO sp" is - ** executed. - */ - if( NEVER( - rc==SQLITE_OK && pPg->pgno>pPager->dbSize && subjRequiresPage(pPg) - ) ){ - rc = subjournalPage(pPg); - } - - /* Write the contents of the page out to the database file. */ - if( rc==SQLITE_OK ){ - pPg->pDirty = 0; - rc = pager_write_pagelist(pPg); } /* Mark the page as clean. */ @@ -3192,6 +3734,9 @@ int sqlite3PagerOpen( journalFileSize * 2 + /* The two journal files */ nPathname + 1 + /* zFilename */ nPathname + 8 + 1 /* zJournal */ +#ifndef SQLITE_OMIT_WAL + + nPathname + 4 + 1 /* zWal */ +#endif ); assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); if( !pPtr ){ @@ -3212,7 +3757,16 @@ int sqlite3PagerOpen( memcpy(pPager->zFilename, zPathname, nPathname); memcpy(pPager->zJournal, zPathname, nPathname); memcpy(&pPager->zJournal[nPathname], "-journal", 8); - if( pPager->zFilename[0]==0 ) pPager->zJournal[0] = 0; + if( pPager->zFilename[0]==0 ){ + pPager->zJournal[0] = 0; + } +#ifndef SQLITE_OMIT_WAL + else{ + pPager->zWal = &pPager->zJournal[nPathname+8+1]; + memcpy(pPager->zWal, zPathname, nPathname); + memcpy(&pPager->zWal[nPathname], "-wal", 4); + } +#endif sqlite3_free(zPathname); } pPager->pVfs = pVfs; @@ -3382,17 +3936,22 @@ int sqlite3PagerOpen( */ static int hasHotJournal(Pager *pPager, int *pExists){ sqlite3_vfs * const pVfs = pPager->pVfs; - int rc; /* Return code */ - int exists; /* True if a journal file is present */ + int rc = SQLITE_OK; /* Return code */ + int exists = 1; /* True if a journal file is present */ + int jrnlOpen = !!isOpen(pPager->jfd); assert( pPager!=0 ); assert( pPager->useJournal ); assert( isOpen(pPager->fd) ); - assert( !isOpen(pPager->jfd) ); assert( pPager->state <= PAGER_SHARED ); + assert( jrnlOpen==0 || ( sqlite3OsDeviceCharacteristics(pPager->jfd) & + SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN + )); *pExists = 0; - rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); + if( !jrnlOpen ){ + rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); + } if( rc==SQLITE_OK && exists ){ int locked; /* True if some process holds a RESERVED lock */ @@ -3430,15 +3989,19 @@ static int hasHotJournal(Pager *pPager, int *pExists){ ** If there is, then we consider this journal to be hot. If not, ** it can be ignored. */ - int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; - rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); + if( !jrnlOpen ){ + int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); + } if( rc==SQLITE_OK ){ u8 first = 0; rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0); if( rc==SQLITE_IOERR_SHORT_READ ){ rc = SQLITE_OK; } - sqlite3OsClose(pPager->jfd); + if( !jrnlOpen ){ + sqlite3OsClose(pPager->jfd); + } *pExists = (first!=0); }else if( rc==SQLITE_CANTOPEN ){ /* If we cannot open the rollback journal file in order to see if @@ -3461,67 +4024,6 @@ static int hasHotJournal(Pager *pPager, int *pExists){ return rc; } -/* -** Read the content for page pPg out of the database file and into -** pPg->pData. A shared lock or greater must be held on the database -** file before this function is called. -** -** If page 1 is read, then the value of Pager.dbFileVers[] is set to -** the value read from the database file. -** -** If an IO error occurs, then the IO error is returned to the caller. -** Otherwise, SQLITE_OK is returned. -*/ -static int readDbPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ - Pgno pgno = pPg->pgno; /* Page number to read */ - int rc; /* Return code */ - i64 iOffset; /* Byte offset of file to read from */ - - assert( pPager->state>=PAGER_SHARED && !MEMDB ); - assert( isOpen(pPager->fd) ); - - if( NEVER(!isOpen(pPager->fd)) ){ - assert( pPager->tempFile ); - memset(pPg->pData, 0, pPager->pageSize); - return SQLITE_OK; - } - iOffset = (pgno-1)*(i64)pPager->pageSize; - rc = sqlite3OsRead(pPager->fd, pPg->pData, pPager->pageSize, iOffset); - if( rc==SQLITE_IOERR_SHORT_READ ){ - rc = SQLITE_OK; - } - if( pgno==1 ){ - if( rc ){ - /* If the read is unsuccessful, set the dbFileVers[] to something - ** that will never be a valid file version. dbFileVers[] is a copy - ** of bytes 24..39 of the database. Bytes 28..31 should always be - ** zero. Bytes 32..35 and 35..39 should be page numbers which are - ** never 0xffffffff. So filling pPager->dbFileVers[] with all 0xff - ** bytes should suffice. - ** - ** For an encrypted database, the situation is more complex: bytes - ** 24..39 of the database are white noise. But the probability of - ** white noising equaling 16 bytes of 0xff is vanishingly small so - ** we should still be ok. - */ - memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers)); - }else{ - u8 *dbFileVers = &((u8*)pPg->pData)[24]; - memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); - } - } - CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM); - - PAGER_INCR(sqlite3_pager_readdb_count); - PAGER_INCR(pPager->nRead); - IOTRACE(("PGIN %p %d\n", pPager, pgno)); - PAGERTRACE(("FETCH %d page %d hash(%08x)\n", - PAGERID(pPager), pgno, pager_pagehash(pPg))); - - return rc; -} - /* ** This function is called to obtain a shared lock on the database file. ** It is illegal to call sqlite3PagerAcquire() until after this function @@ -3575,7 +4077,9 @@ int sqlite3PagerSharedLock(Pager *pPager){ pager_reset(pPager); } - if( pPager->state==PAGER_UNLOCK || isErrorReset ){ + if( pagerUseWal(pPager) ){ + rc = pagerBeginReadTransaction(pPager); + }else if( pPager->state==PAGER_UNLOCK || isErrorReset ){ sqlite3_vfs * const pVfs = pPager->pVfs; int isHotJournal = 0; assert( !MEMDB ); @@ -3658,19 +4162,28 @@ int sqlite3PagerSharedLock(Pager *pPager){ goto failed; } - /* TODO: Why are these cleared here? Is it necessary? */ + /* Reset the journal status fields to indicates that we have no + ** rollback journal at this time. */ pPager->journalStarted = 0; pPager->journalOff = 0; pPager->setMaster = 0; pPager->journalHdr = 0; + /* Make sure the journal file has been synced to disk. */ + /* Playback and delete the journal. Drop the database write ** lock and reacquire the read lock. Purge the cache before ** playing back the hot-journal so that we don't end up with - ** an inconsistent cache. + ** an inconsistent cache. Sync the hot journal before playing + ** it back since the process that crashed and left the hot journal + ** probably did not sync it and we are required to always sync + ** the journal before playing it back. */ if( isOpen(pPager->jfd) ){ - rc = pager_playback(pPager, 1); + rc = pagerSyncHotJournal(pPager); + if( rc==SQLITE_OK ){ + rc = pager_playback(pPager, 1); + } if( rc!=SQLITE_OK ){ rc = pager_error(pPager, rc); goto failed; @@ -3698,16 +4211,16 @@ int sqlite3PagerSharedLock(Pager *pPager){ ** detected. The chance of an undetected change is so small that ** it can be neglected. */ + int nPage = 0; char dbFileVers[sizeof(pPager->dbFileVers)]; - sqlite3PagerPagecount(pPager, 0); + sqlite3PagerPagecount(pPager, &nPage); if( pPager->errCode ){ rc = pPager->errCode; goto failed; } - assert( pPager->dbSizeValid ); - if( pPager->dbSize>0 ){ + if( nPage>0 ){ IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); if( rc!=SQLITE_OK ){ @@ -3722,6 +4235,11 @@ int sqlite3PagerSharedLock(Pager *pPager){ } } assert( pPager->exclusiveMode || pPager->state==PAGER_SHARED ); + + /* If there is a WAL file in the file-system, open this database in WAL + ** mode. Otherwise, the following function call is a no-op. + */ + rc = pagerOpenWalIfPresent(pPager); } failed: @@ -3776,7 +4294,7 @@ static void pagerUnlockIfUnused(Pager *pPager){ ** a) When reading a free-list leaf page from the database, and ** ** b) When a savepoint is being rolled back and we need to load -** a new page into the cache to populate with the data read +** a new page into the cache to be filled with the data read ** from the savepoint journal. ** ** If noContent is true, then the data returned is zeroed instead of @@ -3832,7 +4350,7 @@ int sqlite3PagerAcquire( assert( (*ppPage)->pgno==pgno ); assert( (*ppPage)->pPager==pPager || (*ppPage)->pPager==0 ); - if( (*ppPage)->pPager ){ + if( (*ppPage)->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); @@ -3862,8 +4380,8 @@ int sqlite3PagerAcquire( if( MEMDB || nMax<(int)pgno || noContent || !isOpen(pPager->fd) ){ if( pgno>pPager->mxPgno ){ - rc = SQLITE_FULL; - goto pager_acquire_err; + rc = SQLITE_FULL; + goto pager_acquire_err; } if( noContent ){ /* Failure to set the bits in the InJournal bit-vectors is benign. @@ -3947,27 +4465,6 @@ void sqlite3PagerUnref(DbPage *pPg){ } } -/* -** If the main journal file has already been opened, ensure that the -** sub-journal file is open too. If the main journal is not open, -** this function is a no-op. -** -** SQLITE_OK is returned if everything goes according to plan. -** An SQLITE_IOERR_XXX error code is returned if a call to -** sqlite3OsOpen() fails. -*/ -static int openSubJournal(Pager *pPager){ - int rc = SQLITE_OK; - if( isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ){ - if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ - sqlite3MemJournalOpen(pPager->sjfd); - }else{ - rc = pagerOpentemp(pPager, pPager->sjfd, SQLITE_OPEN_SUBJOURNAL); - } - } - return rc; -} - /* ** This function is called at the start of every write transaction. ** There must already be a RESERVED or EXCLUSIVE lock on the database @@ -3992,6 +4489,7 @@ static int openSubJournal(Pager *pPager){ */ static int pager_open_journal(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ + int nPage; /* Size of database file */ sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */ assert( pPager->state>=PAGER_RESERVED ); @@ -4004,13 +4502,10 @@ static int pager_open_journal(Pager *pPager){ ** an error state. */ if( NEVER(pPager->errCode) ) return pPager->errCode; - /* TODO: Is it really possible to get here with dbSizeValid==0? If not, - ** the call to PagerPagecount() can be removed. - */ testcase( pPager->dbSizeValid==0 ); - sqlite3PagerPagecount(pPager, 0); - - pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize); + rc = sqlite3PagerPagecount(pPager, &nPage); + if( rc ) return rc; + pPager->pInJournal = sqlite3BitvecCreate(nPage); if( pPager->pInJournal==0 ){ return SQLITE_NOMEM; } @@ -4052,9 +4547,6 @@ static int pager_open_journal(Pager *pPager){ pPager->journalHdr = 0; rc = writeJournalHdr(pPager); } - if( rc==SQLITE_OK && pPager->nSavepoint ){ - rc = openSubJournal(pPager); - } if( rc!=SQLITE_OK ){ sqlite3BitvecDestroy(pPager->pInJournal); @@ -4092,53 +4584,77 @@ int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){ int rc = SQLITE_OK; assert( pPager->state!=PAGER_UNLOCK ); pPager->subjInMemory = (u8)subjInMemory; + if( pPager->state==PAGER_SHARED ){ assert( pPager->pInJournal==0 ); assert( !MEMDB && !pPager->tempFile ); - /* Obtain a RESERVED lock on the database file. If the exFlag parameter - ** is true, then immediately upgrade this to an EXCLUSIVE lock. The - ** busy-handler callback can be used when upgrading to the EXCLUSIVE - ** lock, but not when obtaining the RESERVED lock. - */ - rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); - if( rc==SQLITE_OK ){ - pPager->state = PAGER_RESERVED; - if( exFlag ){ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + if( pagerUseWal(pPager) ){ + /* If the pager is configured to use locking_mode=exclusive, and an + ** exclusive lock on the database is not already held, obtain it now. + */ + if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){ + rc = sqlite3OsLock(pPager->fd, EXCLUSIVE_LOCK); + pPager->state = PAGER_SHARED; + if( rc!=SQLITE_OK ){ + return rc; + } + sqlite3WalExclusiveMode(pPager->pWal, 1); + } + + /* Grab the write lock on the log file. If successful, upgrade to + ** PAGER_RESERVED state. Otherwise, return an error code to the caller. + ** The busy-handler is not invoked if another connection already + ** holds the write-lock. If possible, the upper layer will call it. + ** + ** WAL mode sets Pager.state to PAGER_RESERVED when it has an open + ** transaction, but never to PAGER_EXCLUSIVE. This is because in + ** PAGER_EXCLUSIVE state the code to roll back savepoint transactions + ** may copy data from the sub-journal into the database file as well + ** as into the page cache. Which would be incorrect in WAL mode. + */ + rc = sqlite3WalBeginWriteTransaction(pPager->pWal); + if( rc==SQLITE_OK ){ + pPager->dbOrigSize = pPager->dbSize; + pPager->state = PAGER_RESERVED; + pPager->journalOff = 0; + } + + assert( rc!=SQLITE_OK || pPager->state==PAGER_RESERVED ); + assert( rc==SQLITE_OK || pPager->state==PAGER_SHARED ); + }else{ + /* Obtain a RESERVED lock on the database file. If the exFlag parameter + ** is true, then immediately upgrade this to an EXCLUSIVE lock. The + ** busy-handler callback can be used when upgrading to the EXCLUSIVE + ** lock, but not when obtaining the RESERVED lock. + */ + rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); + if( rc==SQLITE_OK ){ + pPager->state = PAGER_RESERVED; + if( exFlag ){ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + } } } - /* If the required locks were successfully obtained, open the journal - ** file and write the first journal-header to it. + /* No need to open the journal file at this time. It will be + ** opened before it is written to. If we defer opening the journal, + ** we might save the work of creating a file if the transaction + ** ends up being a no-op. */ - if( rc==SQLITE_OK && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ - rc = pager_open_journal(pPager); + + if( rc!=SQLITE_OK ){ + assert( !pPager->dbModified ); + /* Ignore any IO error that occurs within pager_end_transaction(). The + ** purpose of this call is to reset the internal state of the pager + ** sub-system. It doesn't matter if the journal-file is not properly + ** finalized at this point (since it is not a valid journal file anyway). + */ + pager_end_transaction(pPager, 0); } - }else if( isOpen(pPager->jfd) && pPager->journalOff==0 ){ - /* This happens when the pager was in exclusive-access mode the last - ** time a (read or write) transaction was successfully concluded - ** by this connection. Instead of deleting the journal file it was - ** kept open and either was truncated to 0 bytes or its header was - ** overwritten with zeros. - */ - assert( pPager->nRec==0 ); - assert( pPager->dbOrigSize==0 ); - assert( pPager->pInJournal==0 ); - rc = pager_open_journal(pPager); } PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager))); - assert( !isOpen(pPager->jfd) || pPager->journalOff>0 || rc!=SQLITE_OK ); - if( rc!=SQLITE_OK ){ - assert( !pPager->dbModified ); - /* Ignore any IO error that occurs within pager_end_transaction(). The - ** purpose of this call is to reset the internal state of the pager - ** sub-system. It doesn't matter if the journal-file is not properly - ** finalized at this point (since it is not a valid journal file anyway). - */ - pager_end_transaction(pPager, 0); - } return rc; } @@ -4159,8 +4675,8 @@ static int pager_write(PgHdr *pPg){ */ assert( pPager->state>=PAGER_RESERVED ); - /* If an error has been previously detected, we should not be - ** calling this routine. Repeat the error for robustness. + /* If an error has been previously detected, report the same error + ** again. */ if( NEVER(pPager->errCode) ) return pPager->errCode; @@ -4177,6 +4693,7 @@ static int pager_write(PgHdr *pPg){ */ sqlite3PcacheMakeDirty(pPg); if( pageInJournal(pPg) && !subjRequiresPage(pPg) ){ + assert( !pagerUseWal(pPager) ); pPager->dbModified = 1; }else{ @@ -4185,14 +4702,14 @@ static int pager_write(PgHdr *pPg){ ** or both. ** ** Higher level routines should have already started a transaction, - ** which means they have acquired the necessary locks and opened - ** a rollback journal. Double-check to makes sure this is the case. + ** which means they have acquired the necessary locks but the rollback + ** journal might not yet be open. */ - rc = sqlite3PagerBegin(pPager, 0, pPager->subjInMemory); - if( NEVER(rc!=SQLITE_OK) ){ - return rc; - } - if( !isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ + assert( pPager->state>=RESERVED_LOCK ); + if( pPager->pInJournal==0 + && pPager->journalMode!=PAGER_JOURNALMODE_OFF + && !pagerUseWal(pPager) + ){ assert( pPager->useJournal ); rc = pager_open_journal(pPager); if( rc!=SQLITE_OK ) return rc; @@ -4204,6 +4721,7 @@ static int pager_write(PgHdr *pPg){ ** the transaction journal if it is not there already. */ if( !pageInJournal(pPg) && isOpen(pPager->jfd) ){ + assert( !pagerUseWal(pPager) ); if( pPg->pgno<=pPager->dbOrigSize ){ u32 cksum; char *pData2; @@ -4212,6 +4730,8 @@ static int pager_write(PgHdr *pPg){ ** contains the database locks. The following assert verifies ** that we do not. */ assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); + + assert( pPager->journalHdr <= pPager->journalOff ); CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); cksum = pager_cksum(pPager, (u8*)pData2); rc = write32bits(pPager->jfd, pPager->journalOff, pPg->pgno); @@ -4314,16 +4834,17 @@ int sqlite3PagerWrite(DbPage *pDbPage){ if( nPagePerSector>1 ){ Pgno nPageCount; /* Total number of pages in database file */ Pgno pg1; /* First page of the sector pPg is located on. */ - int nPage; /* Number of pages starting at pg1 to journal */ + int nPage = 0; /* Number of pages starting at pg1 to journal */ int ii; /* Loop counter */ int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */ - /* Set the doNotSync flag to 1. This is because we cannot allow a journal - ** header to be written between the pages journaled by this function. + /* Set the doNotSyncSpill flag to 1. This is because we cannot allow + ** a journal header to be written between the pages journaled by + ** this function. */ assert( !MEMDB ); - assert( pPager->doNotSync==0 ); - pPager->doNotSync = 1; + assert( pPager->doNotSyncSpill==0 ); + pPager->doNotSyncSpill++; /* This trick assumes that both the page-size and sector-size are ** an integer power of 2. It sets variable pg1 to the identifier @@ -4331,17 +4852,19 @@ int sqlite3PagerWrite(DbPage *pDbPage){ */ pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; - sqlite3PagerPagecount(pPager, (int *)&nPageCount); - if( pPg->pgno>nPageCount ){ - nPage = (pPg->pgno - pg1)+1; - }else if( (pg1+nPagePerSector-1)>nPageCount ){ - nPage = nPageCount+1-pg1; - }else{ - nPage = nPagePerSector; + rc = sqlite3PagerPagecount(pPager, (int *)&nPageCount); + if( rc==SQLITE_OK ){ + if( pPg->pgno>nPageCount ){ + nPage = (pPg->pgno - pg1)+1; + }else if( (pg1+nPagePerSector-1)>nPageCount ){ + nPage = nPageCount+1-pg1; + }else{ + nPage = nPagePerSector; + } + assert(nPage>0); + assert(pg1<=pPg->pgno); + assert((pg1+nPage)>pPg->pgno); } - assert(nPage>0); - assert(pg1<=pPg->pgno); - assert((pg1+nPage)>pPg->pgno); for(ii=0; iineedSync); } - assert( pPager->doNotSync==1 ); - pPager->doNotSync = 0; + assert( pPager->doNotSyncSpill==1 ); + pPager->doNotSyncSpill--; }else{ rc = pager_write(pDbPage); } @@ -4492,11 +5015,20 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ change_counter++; put32bits(((char*)pPgHdr->pData)+24, change_counter); + /* Also store the SQLite version number in bytes 96..99 and in + ** bytes 92..95 store the change counter for which the version number + ** is valid. */ + put32bits(((char*)pPgHdr->pData)+92, change_counter); + put32bits(((char*)pPgHdr->pData)+96, SQLITE_VERSION_NUMBER); + /* If running in direct mode, write the contents of page 1 to the file. */ if( DIRECT_MODE ){ - const void *zBuf = pPgHdr->pData; + const void *zBuf; assert( pPager->dbFileSize>0 ); - rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); + CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM, zBuf); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); + } if( rc==SQLITE_OK ){ pPager->changeCountDone = 1; } @@ -4565,10 +5097,8 @@ int sqlite3PagerCommitPhaseOne( /* The dbOrigSize is never set if journal_mode=OFF */ assert( pPager->journalMode!=PAGER_JOURNALMODE_OFF || pPager->dbOrigSize==0 ); - /* If a prior error occurred, this routine should not be called. ROLLBACK - ** is the appropriate response to an error, not COMMIT. Guard against - ** coding errors by repeating the prior error. */ - if( NEVER(pPager->errCode) ) return pPager->errCode; + /* If a prior error occurred, report that error again. */ + if( pPager->errCode ) return pPager->errCode; PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", pPager->zFilename, zMaster, pPager->dbSize)); @@ -4580,129 +5110,143 @@ int sqlite3PagerCommitPhaseOne( */ sqlite3BackupRestart(pPager->pBackup); }else if( pPager->state!=PAGER_SYNCED && pPager->dbModified ){ - - /* The following block updates the change-counter. Exactly how it - ** does this depends on whether or not the atomic-update optimization - ** was enabled at compile time, and if this transaction meets the - ** runtime criteria to use the operation: - ** - ** * The file-system supports the atomic-write property for - ** blocks of size page-size, and - ** * This commit is not part of a multi-file transaction, and - ** * Exactly one page has been modified and store in the journal file. - ** - ** If the optimization was not enabled at compile time, then the - ** pager_incr_changecounter() function is called to update the change - ** counter in 'indirect-mode'. If the optimization is compiled in but - ** is not applicable to this transaction, call sqlite3JournalCreate() - ** to make sure the journal file has actually been created, then call - ** pager_incr_changecounter() to update the change-counter in indirect - ** mode. - ** - ** Otherwise, if the optimization is both enabled and applicable, - ** then call pager_incr_changecounter() to update the change-counter - ** in 'direct' mode. In this case the journal file will never be - ** created for this transaction. - */ -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - PgHdr *pPg; - assert( isOpen(pPager->jfd) || pPager->journalMode==PAGER_JOURNALMODE_OFF ); - if( !zMaster && isOpen(pPager->jfd) - && pPager->journalOff==jrnlBufferSize(pPager) - && pPager->dbSize>=pPager->dbFileSize - && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) - ){ - /* Update the db file change counter via the direct-write method. The - ** following call will modify the in-memory representation of page 1 - ** to include the updated change counter and then write page 1 - ** directly to the database file. Because of the atomic-write - ** property of the host file-system, this is safe. - */ - rc = pager_incr_changecounter(pPager, 1); - }else{ - rc = sqlite3JournalCreate(pPager->jfd); - if( rc==SQLITE_OK ){ - rc = pager_incr_changecounter(pPager, 0); + if( pagerUseWal(pPager) ){ + PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache); + if( pList ){ + rc = pagerWalFrames(pPager, pList, pPager->dbSize, 1, + (pPager->fullSync ? pPager->sync_flags : 0) + ); } - } -#else - rc = pager_incr_changecounter(pPager, 0); -#endif - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - /* If this transaction has made the database smaller, then all pages - ** being discarded by the truncation must be written to the journal - ** file. This can only happen in auto-vacuum mode. - ** - ** Before reading the pages with page numbers larger than the - ** current value of Pager.dbSize, set dbSize back to the value - ** that it took at the start of the transaction. Otherwise, the - ** calls to sqlite3PagerGet() return zeroed pages instead of - ** reading data from the database file. - ** - ** When journal_mode==OFF the dbOrigSize is always zero, so this - ** block never runs if journal_mode=OFF. - */ -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pPager->dbSizedbOrigSize - && ALWAYS(pPager->journalMode!=PAGER_JOURNALMODE_OFF) - ){ - Pgno i; /* Iterator variable */ - const Pgno iSkip = PAGER_MJ_PGNO(pPager); /* Pending lock page */ - const Pgno dbSize = pPager->dbSize; /* Database image size */ - pPager->dbSize = pPager->dbOrigSize; - for( i=dbSize+1; i<=pPager->dbOrigSize; i++ ){ - if( !sqlite3BitvecTest(pPager->pInJournal, i) && i!=iSkip ){ - PgHdr *pPage; /* Page to journal */ - rc = sqlite3PagerGet(pPager, i, &pPage); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - rc = sqlite3PagerWrite(pPage); - sqlite3PagerUnref(pPage); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + if( rc==SQLITE_OK ){ + sqlite3PcacheCleanAll(pPager->pPCache); + } + }else{ + /* The following block updates the change-counter. Exactly how it + ** does this depends on whether or not the atomic-update optimization + ** was enabled at compile time, and if this transaction meets the + ** runtime criteria to use the operation: + ** + ** * The file-system supports the atomic-write property for + ** blocks of size page-size, and + ** * This commit is not part of a multi-file transaction, and + ** * Exactly one page has been modified and store in the journal file. + ** + ** If the optimization was not enabled at compile time, then the + ** pager_incr_changecounter() function is called to update the change + ** counter in 'indirect-mode'. If the optimization is compiled in but + ** is not applicable to this transaction, call sqlite3JournalCreate() + ** to make sure the journal file has actually been created, then call + ** pager_incr_changecounter() to update the change-counter in indirect + ** mode. + ** + ** Otherwise, if the optimization is both enabled and applicable, + ** then call pager_incr_changecounter() to update the change-counter + ** in 'direct' mode. In this case the journal file will never be + ** created for this transaction. + */ + #ifdef SQLITE_ENABLE_ATOMIC_WRITE + PgHdr *pPg; + assert( isOpen(pPager->jfd) + || pPager->journalMode==PAGER_JOURNALMODE_OFF + || pPager->journalMode==PAGER_JOURNALMODE_WAL + ); + if( !zMaster && isOpen(pPager->jfd) + && pPager->journalOff==jrnlBufferSize(pPager) + && pPager->dbSize>=pPager->dbFileSize + && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) + ){ + /* Update the db file change counter via the direct-write method. The + ** following call will modify the in-memory representation of page 1 + ** to include the updated change counter and then write page 1 + ** directly to the database file. Because of the atomic-write + ** property of the host file-system, this is safe. + */ + rc = pager_incr_changecounter(pPager, 1); + }else{ + rc = sqlite3JournalCreate(pPager->jfd); + if( rc==SQLITE_OK ){ + rc = pager_incr_changecounter(pPager, 0); } - } - pPager->dbSize = dbSize; - } -#endif - - /* Write the master journal name into the journal file. If a master - ** journal file name has already been written to the journal file, - ** or if zMaster is NULL (no master journal), then this call is a no-op. - */ - rc = writeMasterJournal(pPager, zMaster); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - /* Sync the journal file. If the atomic-update optimization is being - ** used, this call will not create the journal file or perform any - ** real IO. - */ - rc = syncJournal(pPager); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - /* Write all dirty pages to the database file. */ - rc = pager_write_pagelist(sqlite3PcacheDirtyList(pPager->pPCache)); - if( rc!=SQLITE_OK ){ - assert( rc!=SQLITE_IOERR_BLOCKED ); - goto commit_phase_one_exit; - } - sqlite3PcacheCleanAll(pPager->pPCache); - - /* If the file on disk is not the same size as the database image, - ** then use pager_truncate to grow or shrink the file here. - */ - if( pPager->dbSize!=pPager->dbFileSize ){ - Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); - assert( pPager->state>=PAGER_EXCLUSIVE ); - rc = pager_truncate(pPager, nNew); + } + #else + rc = pager_incr_changecounter(pPager, 0); + #endif if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* If this transaction has made the database smaller, then all pages + ** being discarded by the truncation must be written to the journal + ** file. This can only happen in auto-vacuum mode. + ** + ** Before reading the pages with page numbers larger than the + ** current value of Pager.dbSize, set dbSize back to the value + ** that it took at the start of the transaction. Otherwise, the + ** calls to sqlite3PagerGet() return zeroed pages instead of + ** reading data from the database file. + ** + ** When journal_mode==OFF the dbOrigSize is always zero, so this + ** block never runs if journal_mode=OFF. + */ + #ifndef SQLITE_OMIT_AUTOVACUUM + if( pPager->dbSizedbOrigSize + && ALWAYS(pPager->journalMode!=PAGER_JOURNALMODE_OFF) + ){ + Pgno i; /* Iterator variable */ + const Pgno iSkip = PAGER_MJ_PGNO(pPager); /* Pending lock page */ + const Pgno dbSize = pPager->dbSize; /* Database image size */ + pPager->dbSize = pPager->dbOrigSize; + for( i=dbSize+1; i<=pPager->dbOrigSize; i++ ){ + if( !sqlite3BitvecTest(pPager->pInJournal, i) && i!=iSkip ){ + PgHdr *pPage; /* Page to journal */ + rc = sqlite3PagerGet(pPager, i, &pPage); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + rc = sqlite3PagerWrite(pPage); + sqlite3PagerUnref(pPage); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + } + } + pPager->dbSize = dbSize; + } + #endif + + /* Write the master journal name into the journal file. If a master + ** journal file name has already been written to the journal file, + ** or if zMaster is NULL (no master journal), then this call is a no-op. + */ + rc = writeMasterJournal(pPager, zMaster); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* Sync the journal file. If the atomic-update optimization is being + ** used, this call will not create the journal file or perform any + ** real IO. + */ + rc = syncJournal(pPager); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* Write all dirty pages to the database file. */ + rc = pager_write_pagelist(pPager,sqlite3PcacheDirtyList(pPager->pPCache)); + if( rc!=SQLITE_OK ){ + assert( rc!=SQLITE_IOERR_BLOCKED ); + goto commit_phase_one_exit; + } + sqlite3PcacheCleanAll(pPager->pPCache); + + /* If the file on disk is not the same size as the database image, + ** then use pager_truncate to grow or shrink the file here. + */ + if( pPager->dbSize!=pPager->dbFileSize ){ + Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); + assert( pPager->state>=PAGER_EXCLUSIVE ); + rc = pager_truncate(pPager, nNew); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + } + + /* Finally, sync the database file. */ + if( !pPager->noSync && !noSync ){ + rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); + } + IOTRACE(("DBSYNC %p\n", pPager)) } - /* Finally, sync the database file. */ - if( !pPager->noSync && !noSync ){ - rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); - } - IOTRACE(("DBSYNC %p\n", pPager)) - pPager->state = PAGER_SYNCED; } @@ -4735,8 +5279,9 @@ int sqlite3PagerCommitPhaseTwo(Pager *pPager){ if( NEVER(pPager->errCode) ) return pPager->errCode; /* This function should not be called if the pager is not in at least - ** PAGER_RESERVED state. And indeed SQLite never does this. But it is - ** nice to have this defensive test here anyway. + ** PAGER_RESERVED state. **FIXME**: Make it so that this test always + ** fails - make it so that we never reach this point if we do not hold + ** all necessary locks. */ if( NEVER(pPager->statedbModified==0 && pPager->exclusiveMode && pPager->journalMode==PAGER_JOURNALMODE_PERSIST ){ - assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); + assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) || !pPager->journalOff ); return SQLITE_OK; } @@ -4787,7 +5332,7 @@ int sqlite3PagerCommitPhaseTwo(Pager *pPager){ ** (i.e. either SQLITE_IOERR or SQLITE_CORRUPT). ** ** * If the pager is in PAGER_RESERVED state, then attempt (1). Whether -** or not (1) is succussful, also attempt (2). If successful, return +** or not (1) is successful, also attempt (2). If successful, return ** SQLITE_OK. Otherwise, enter the error state and return the first ** error code encountered. ** @@ -4810,7 +5355,14 @@ int sqlite3PagerCommitPhaseTwo(Pager *pPager){ int sqlite3PagerRollback(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager))); - if( !pPager->dbModified || !isOpen(pPager->jfd) ){ + if( pagerUseWal(pPager) ){ + int rc2; + + rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, -1); + rc2 = pager_end_transaction(pPager, pPager->setMaster); + if( rc==SQLITE_OK ) rc = rc2; + rc = pager_error(pPager, rc); + }else if( !pPager->dbModified || !isOpen(pPager->jfd) ){ rc = pager_end_transaction(pPager, pPager->setMaster); }else if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ if( pPager->state>=PAGER_EXCLUSIVE ){ @@ -4857,6 +5409,16 @@ int sqlite3PagerRefcount(Pager *pPager){ return sqlite3PcacheRefCount(pPager->pPCache); } +/* +** Return the approximate number of bytes of memory currently +** used by the pager and its associated cache. +*/ +int sqlite3PagerMemUsed(Pager *pPager){ + int perPageSize = pPager->pageSize + pPager->nExtra + 20; + return perPageSize*sqlite3PcachePagecount(pPager->pPCache) + + sqlite3MallocSize(pPager); +} + /* ** Return the number of references to the specified page. */ @@ -4909,11 +5471,10 @@ int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ if( nSavepoint>nCurrent && pPager->useJournal ){ int ii; /* Iterator variable */ PagerSavepoint *aNew; /* New Pager.aSavepoint array */ + int nPage; /* Size of database file */ - /* Either there is no active journal or the sub-journal is open or - ** the journal is always stored in memory */ - assert( pPager->nSavepoint==0 || isOpen(pPager->sjfd) || - pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); + rc = sqlite3PagerPagecount(pPager, &nPage); + if( rc ) return rc; /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM ** if the allocation fails. Otherwise, zero the new portion in case a @@ -4927,26 +5488,26 @@ int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ } memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint)); pPager->aSavepoint = aNew; - pPager->nSavepoint = nSavepoint; /* Populate the PagerSavepoint structures just allocated. */ for(ii=nCurrent; iidbSizeValid ); - aNew[ii].nOrig = pPager->dbSize; - if( isOpen(pPager->jfd) && ALWAYS(pPager->journalOff>0) ){ + aNew[ii].nOrig = nPage; + if( isOpen(pPager->jfd) && pPager->journalOff>0 ){ aNew[ii].iOffset = pPager->journalOff; }else{ aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager); } aNew[ii].iSubRec = pPager->nSubRec; - aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); + aNew[ii].pInSavepoint = sqlite3BitvecCreate(nPage); if( !aNew[ii].pInSavepoint ){ return SQLITE_NOMEM; } + if( pagerUseWal(pPager) ){ + sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData); + } + pPager->nSavepoint = ii+1; } - - /* Open the sub-journal, if it is not already opened. */ - rc = openSubJournal(pPager); + assert( pPager->nSavepoint==nSavepoint ); assertTruncateConstraint(pPager); } @@ -5020,7 +5581,7 @@ int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ ** not yet been opened. In this case there have been no changes to ** the database file, so the playback operation can be skipped. */ - else if( isOpen(pPager->jfd) ){ + else if( pagerUseWal(pPager) || isOpen(pPager->jfd) ){ PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1]; rc = pagerPlaybackSavepoint(pPager, pSavepoint); assert(rc!=SQLITE_DONE); @@ -5072,7 +5633,7 @@ int sqlite3PagerNosync(Pager *pPager){ /* ** Set or retrieve the codec for this pager */ -static void sqlite3PagerSetCodec( +void sqlite3PagerSetCodec( Pager *pPager, void *(*xCodec)(void*,void*,Pgno,int), void (*xCodecSizeChng)(void*,int,int), @@ -5086,7 +5647,7 @@ static void sqlite3PagerSetCodec( pPager->pCodec = pCodec; pagerReportSize(pPager); } -static void *sqlite3PagerGetCodec(Pager *pPager){ +void *sqlite3PagerGetCodec(Pager *pPager){ return pPager->pCodec; } #endif @@ -5289,48 +5850,130 @@ int sqlite3PagerLockingMode(Pager *pPager, int eMode){ } /* -** Get/set the journal-mode for this pager. Parameter eMode must be one of: +** Set the journal-mode for this pager. Parameter eMode must be one of: ** -** PAGER_JOURNALMODE_QUERY ** PAGER_JOURNALMODE_DELETE ** PAGER_JOURNALMODE_TRUNCATE ** PAGER_JOURNALMODE_PERSIST ** PAGER_JOURNALMODE_OFF ** PAGER_JOURNALMODE_MEMORY +** PAGER_JOURNALMODE_WAL ** -** If the parameter is not _QUERY, then the journal_mode is set to the -** value specified if the change is allowed. The change is disallowed -** for the following reasons: +** The journalmode is set to the value specified if the change is allowed. +** The change may be disallowed for the following reasons: ** ** * An in-memory database can only have its journal_mode set to _OFF ** or _MEMORY. ** -** * The journal mode may not be changed while a transaction is active. +** * Temporary databases cannot have _WAL journalmode. ** ** The returned indicate the current (possibly updated) journal-mode. */ -int sqlite3PagerJournalMode(Pager *pPager, int eMode){ - assert( eMode==PAGER_JOURNALMODE_QUERY - || eMode==PAGER_JOURNALMODE_DELETE +int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ + u8 eOld = pPager->journalMode; /* Prior journalmode */ + + /* The eMode parameter is always valid */ + assert( eMode==PAGER_JOURNALMODE_DELETE || eMode==PAGER_JOURNALMODE_TRUNCATE || eMode==PAGER_JOURNALMODE_PERSIST || eMode==PAGER_JOURNALMODE_OFF + || eMode==PAGER_JOURNALMODE_WAL || eMode==PAGER_JOURNALMODE_MEMORY ); - assert( PAGER_JOURNALMODE_QUERY<0 ); - if( eMode>=0 - && (!MEMDB || eMode==PAGER_JOURNALMODE_MEMORY - || eMode==PAGER_JOURNALMODE_OFF) - && !pPager->dbModified - && (!isOpen(pPager->jfd) || 0==pPager->journalOff) - ){ - if( isOpen(pPager->jfd) ){ + + /* This routine is only called from the OP_JournalMode opcode, and + ** the logic there will never allow a temporary file to be changed + ** to WAL mode. + */ + assert( pPager->tempFile==0 || eMode!=PAGER_JOURNALMODE_WAL ); + + /* Do allow the journalmode of an in-memory database to be set to + ** anything other than MEMORY or OFF + */ + if( MEMDB ){ + assert( eOld==PAGER_JOURNALMODE_MEMORY || eOld==PAGER_JOURNALMODE_OFF ); + if( eMode!=PAGER_JOURNALMODE_MEMORY && eMode!=PAGER_JOURNALMODE_OFF ){ + eMode = eOld; + } + } + + if( eMode!=eOld ){ + /* When changing between rollback modes, close the journal file prior + ** to the change. But when changing from a rollback mode to WAL, keep + ** the journal open since there is a rollback-style transaction in play + ** used to convert the version numbers in the btree header. + */ + if( isOpen(pPager->jfd) && eMode!=PAGER_JOURNALMODE_WAL ){ sqlite3OsClose(pPager->jfd); } + + /* Change the journal mode. */ pPager->journalMode = (u8)eMode; + + /* When transistioning from TRUNCATE or PERSIST to any other journal + ** mode except WAL (and we are not in locking_mode=EXCLUSIVE) then + ** delete the journal file. + */ + assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); + assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); + assert( (PAGER_JOURNALMODE_DELETE & 5)==0 ); + assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 ); + assert( (PAGER_JOURNALMODE_OFF & 5)==0 ); + assert( (PAGER_JOURNALMODE_WAL & 5)==5 ); + + assert( isOpen(pPager->fd) || pPager->exclusiveMode ); + if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ + + /* In this case we would like to delete the journal file. If it is + ** not possible, then that is not a problem. Deleting the journal file + ** here is an optimization only. + ** + ** Before deleting the journal file, obtain a RESERVED lock on the + ** database file. This ensures that the journal file is not deleted + ** while it is in use by some other client. + */ + int rc = SQLITE_OK; + int state = pPager->state; + if( statestate==PAGER_SHARED ){ + assert( rc==SQLITE_OK ); + rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); + } + if( rc==SQLITE_OK ){ + sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + } + if( rc==SQLITE_OK && state==PAGER_SHARED ){ + sqlite3OsUnlock(pPager->fd, SHARED_LOCK); + }else if( state==PAGER_UNLOCK ){ + pager_unlock(pPager); + } + assert( state==pPager->state ); + } } + + /* Return the new journal mode */ return (int)pPager->journalMode; } +/* +** Return the current journal mode. +*/ +int sqlite3PagerGetJournalMode(Pager *pPager){ + return (int)pPager->journalMode; +} + +/* +** Return TRUE if the pager is in a state where it is OK to change the +** journalmode. Journalmode changes can only happen when the database +** is unmodified. +*/ +int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ + if( pPager->dbModified ) return 0; + if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; + return 1; +} + /* ** Get/set the size-limit used for persistent journal files. ** @@ -5354,4 +5997,144 @@ sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ return &pPager->pBackup; } +#ifndef SQLITE_OMIT_WAL +/* +** This function is called when the user invokes "PRAGMA checkpoint". +*/ +int sqlite3PagerCheckpoint(Pager *pPager){ + int rc = SQLITE_OK; + if( pPager->pWal ){ + u8 *zBuf = (u8 *)pPager->pTmpSpace; + rc = sqlite3WalCheckpoint(pPager->pWal, + (pPager->noSync ? 0 : pPager->sync_flags), + pPager->pageSize, zBuf + ); + } + return rc; +} + +int sqlite3PagerWalCallback(Pager *pPager){ + return sqlite3WalCallback(pPager->pWal); +} + +/* +** Return true if the underlying VFS for the given pager supports the +** primitives necessary for write-ahead logging. +*/ +int sqlite3PagerWalSupported(Pager *pPager){ + const sqlite3_io_methods *pMethods = pPager->fd->pMethods; + return pMethods->iVersion>=2 && pMethods->xShmMap!=0; +} + +/* +** The caller must be holding a SHARED lock on the database file to call +** this function. +** +** If the pager passed as the first argument is open on a real database +** file (not a temp file or an in-memory database), and the WAL file +** is not already open, make an attempt to open it now. If successful, +** return SQLITE_OK. If an error occurs or the VFS used by the pager does +** not support the xShmXXX() methods, return an error code. *pisOpen is +** not modified in either case. +** +** If the pager is open on a temp-file (or in-memory database), or if +** the WAL file is already open, set *pisOpen to 1 and return SQLITE_OK +** without doing anything. +*/ +int sqlite3PagerOpenWal( + Pager *pPager, /* Pager object */ + int *pisOpen /* OUT: Set to true if call is a no-op */ +){ + int rc = SQLITE_OK; /* Return code */ + + assert( pPager->state>=PAGER_SHARED ); + assert( (pisOpen==0 && !pPager->tempFile && !pPager->pWal) || *pisOpen==0 ); + + if( !pPager->tempFile && !pPager->pWal ){ + if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; + + /* Open the connection to the log file. If this operation fails, + ** (e.g. due to malloc() failure), unlock the database file and + ** return an error code. + */ + rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, pPager->zWal, &pPager->pWal); + if( rc==SQLITE_OK ){ + pPager->journalMode = PAGER_JOURNALMODE_WAL; + } + }else{ + *pisOpen = 1; + } + + return rc; +} + +/* +** This function is called to close the connection to the log file prior +** to switching from WAL to rollback mode. +** +** Before closing the log file, this function attempts to take an +** EXCLUSIVE lock on the database file. If this cannot be obtained, an +** error (SQLITE_BUSY) is returned and the log connection is not closed. +** If successful, the EXCLUSIVE lock is not released before returning. +*/ +int sqlite3PagerCloseWal(Pager *pPager){ + int rc = SQLITE_OK; + + assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); + + /* If the log file is not already open, but does exist in the file-system, + ** it may need to be checkpointed before the connection can switch to + ** rollback mode. Open it now so this can happen. + */ + if( !pPager->pWal ){ + int logexists = 0; + rc = sqlite3OsLock(pPager->fd, SQLITE_LOCK_SHARED); + if( rc==SQLITE_OK ){ + rc = sqlite3OsAccess( + pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists + ); + } + if( rc==SQLITE_OK && logexists ){ + rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, + pPager->zWal, &pPager->pWal); + } + } + + /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on + ** the database file, the log and log-summary files will be deleted. + */ + if( rc==SQLITE_OK && pPager->pWal ){ + rc = sqlite3OsLock(pPager->fd, SQLITE_LOCK_EXCLUSIVE); + if( rc==SQLITE_OK ){ + rc = sqlite3WalClose(pPager->pWal, + (pPager->noSync ? 0 : pPager->sync_flags), + pPager->pageSize, (u8*)pPager->pTmpSpace + ); + pPager->pWal = 0; + }else{ + /* If we cannot get an EXCLUSIVE lock, downgrade the PENDING lock + ** that we did get back to SHARED. */ + sqlite3OsUnlock(pPager->fd, SQLITE_LOCK_SHARED); + } + } + return rc; +} + +#ifdef SQLITE_HAS_CODEC +/* +** This function is called by the wal module when writing page content +** into the log file. +** +** This function returns a pointer to a buffer containing the encrypted +** page content. If a malloc fails, this function may return NULL. +*/ +void *sqlite3PagerCodec(PgHdr *pPg){ + void *aData = 0; + CODEC2(pPg->pPager, pPg->pData, pPg->pgno, 6, return 0, aData); + return aData; +} +#endif /* SQLITE_HAS_CODEC */ + +#endif /* !SQLITE_OMIT_WAL */ + #endif /* SQLITE_OMIT_DISKIO */ diff --git a/src/pager.h b/src/pager.h index 0fe1917..6036827 100644 --- a/src/pager.h +++ b/src/pager.h @@ -68,14 +68,15 @@ typedef struct PgHdr DbPage; #define PAGER_LOCKINGMODE_EXCLUSIVE 1 /* -** Valid values for the second argument to sqlite3PagerJournalMode(). +** Numeric constants that encode the journalmode. */ -#define PAGER_JOURNALMODE_QUERY -1 +#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ #define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ #define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ #define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ #define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ #define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ +#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ /* ** The remainder of this file contains the declarations of the functions @@ -103,7 +104,9 @@ int sqlite3PagerMaxPageCount(Pager*, int); void sqlite3PagerSetCachesize(Pager*, int); void sqlite3PagerSetSafetyLevel(Pager*,int,int); int sqlite3PagerLockingMode(Pager *, int); -int sqlite3PagerJournalMode(Pager *, int); +int sqlite3PagerSetJournalMode(Pager *, int); +int sqlite3PagerGetJournalMode(Pager*); +int sqlite3PagerOkToChangeJournalMode(Pager*); i64 sqlite3PagerJournalSizeLimit(Pager *, i64); sqlite3_backup **sqlite3PagerBackupPtr(Pager*); @@ -133,9 +136,16 @@ int sqlite3PagerOpenSavepoint(Pager *pPager, int n); int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); int sqlite3PagerSharedLock(Pager *pPager); +int sqlite3PagerCheckpoint(Pager *pPager); +int sqlite3PagerWalSupported(Pager *pPager); +int sqlite3PagerWalCallback(Pager *pPager); +int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); +int sqlite3PagerCloseWal(Pager *pPager); + /* Functions used to query pager state and configuration. */ u8 sqlite3PagerIsreadonly(Pager*); int sqlite3PagerRefcount(Pager*); +int sqlite3PagerMemUsed(Pager*); const char *sqlite3PagerFilename(Pager*); const sqlite3_vfs *sqlite3PagerVfs(Pager*); sqlite3_file *sqlite3PagerFile(Pager*); @@ -147,6 +157,10 @@ int sqlite3PagerIsMemdb(Pager*); /* Functions used to truncate the database file. */ void sqlite3PagerTruncateImage(Pager*,Pgno); +#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL) +void *sqlite3PagerCodec(DbPage *); +#endif + /* Functions to support testing and debugging. */ #if !defined(NDEBUG) || defined(SQLITE_TEST) Pgno sqlite3PagerPagenumber(DbPage*); diff --git a/src/parse.y b/src/parse.y index 98714ee..c32f278 100644 --- a/src/parse.y +++ b/src/parse.y @@ -848,23 +848,27 @@ likeop(A) ::= LIKE_KW(X). {A.eOperator = X; A.not = 0;} likeop(A) ::= NOT LIKE_KW(X). {A.eOperator = X; A.not = 1;} likeop(A) ::= MATCH(X). {A.eOperator = X; A.not = 0;} likeop(A) ::= NOT MATCH(X). {A.eOperator = X; A.not = 1;} -%type escape {ExprSpan} -%destructor escape {sqlite3ExprDelete(pParse->db, $$.pExpr);} -escape(X) ::= ESCAPE expr(A). [ESCAPE] {X = A;} -escape(X) ::= . [ESCAPE] {memset(&X,0,sizeof(X));} -expr(A) ::= expr(X) likeop(OP) expr(Y) escape(E). [LIKE_KW] { +expr(A) ::= expr(X) likeop(OP) expr(Y). [LIKE_KW] { ExprList *pList; pList = sqlite3ExprListAppend(pParse,0, Y.pExpr); pList = sqlite3ExprListAppend(pParse,pList, X.pExpr); - if( E.pExpr ){ - pList = sqlite3ExprListAppend(pParse,pList, E.pExpr); - } A.pExpr = sqlite3ExprFunction(pParse, pList, &OP.eOperator); if( OP.not ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); A.zStart = X.zStart; A.zEnd = Y.zEnd; if( A.pExpr ) A.pExpr->flags |= EP_InfixFunc; } +expr(A) ::= expr(X) likeop(OP) expr(Y) ESCAPE expr(E). [LIKE_KW] { + ExprList *pList; + pList = sqlite3ExprListAppend(pParse,0, Y.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, X.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, E.pExpr); + A.pExpr = sqlite3ExprFunction(pParse, pList, &OP.eOperator); + if( OP.not ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = X.zStart; + A.zEnd = E.zEnd; + if( A.pExpr ) A.pExpr->flags |= EP_InfixFunc; +} %include { /* Construct an expression node for a unary postfix operator @@ -959,14 +963,27 @@ expr(A) ::= expr(W) between_op(N) expr(X) AND expr(Y). [BETWEEN] { in_op(A) ::= IN. {A = 0;} in_op(A) ::= NOT IN. {A = 1;} expr(A) ::= expr(X) in_op(N) LP exprlist(Y) RP(E). [IN] { - A.pExpr = sqlite3PExpr(pParse, TK_IN, X.pExpr, 0, 0); - if( A.pExpr ){ - A.pExpr->x.pList = Y; - sqlite3ExprSetHeight(pParse, A.pExpr); + if( Y==0 ){ + /* Expressions of the form + ** + ** expr1 IN () + ** expr1 NOT IN () + ** + ** simplify to constants 0 (false) and 1 (true), respectively, + ** regardless of the value of expr1. + */ + A.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[N]); + sqlite3ExprDelete(pParse->db, X.pExpr); }else{ - sqlite3ExprListDelete(pParse->db, Y); + A.pExpr = sqlite3PExpr(pParse, TK_IN, X.pExpr, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pList = Y; + sqlite3ExprSetHeight(pParse, A.pExpr); + }else{ + sqlite3ExprListDelete(pParse->db, Y); + } + if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); } - if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); A.zStart = X.zStart; A.zEnd = &E.z[E.n]; } diff --git a/src/pcache.c b/src/pcache.c index 41536e8..23ea0a7 100644 --- a/src/pcache.c +++ b/src/pcache.c @@ -260,15 +260,17 @@ int sqlite3PcacheFetch( if( pPage ){ if( !pPage->pData ){ - memset(pPage, 0, sizeof(PgHdr) + pCache->szExtra); - pPage->pExtra = (void*)&pPage[1]; - pPage->pData = (void *)&((char *)pPage)[sizeof(PgHdr) + pCache->szExtra]; + memset(pPage, 0, sizeof(PgHdr)); + pPage->pData = (void *)&pPage[1]; + pPage->pExtra = (void*)&((char *)pPage->pData)[pCache->szPage]; + memset(pPage->pExtra, 0, pCache->szExtra); pPage->pCache = pCache; pPage->pgno = pgno; } assert( pPage->pCache==pCache ); assert( pPage->pgno==pgno ); - assert( pPage->pExtra==(void *)&pPage[1] ); + assert( pPage->pData==(void *)&pPage[1] ); + assert( pPage->pExtra==(void *)&((char *)&pPage[1])[pCache->szPage] ); if( 0==pPage->nRef ){ pCache->nRef++; @@ -407,7 +409,12 @@ void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ PgHdr *pNext; for(p=pCache->pDirty; p; p=pNext){ pNext = p->pDirtyNext; - if( p->pgno>pgno ){ + /* This routine never gets call with a positive pgno except right + ** after sqlite3PcacheCleanAll(). So if there are dirty pages, + ** it must be that pgno==0. + */ + assert( p->pgno>0 ); + if( ALWAYS(p->pgno>pgno) ){ assert( p->flags&PGHDR_DIRTY ); sqlite3PcacheMakeClean(p); } diff --git a/src/pcache1.c b/src/pcache1.c index a3bd0fc..9f2b299 100644 --- a/src/pcache1.c +++ b/src/pcache1.c @@ -173,6 +173,7 @@ static void *pcache1Alloc(int nByte){ int sz = sqlite3MallocSize(p); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); } + sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); } return p; } @@ -190,7 +191,10 @@ static void pcache1Free(void *p){ pSlot->pNext = pcache1.pFree; pcache1.pFree = pSlot; }else{ - int iSize = sqlite3MallocSize(p); + int iSize; + assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + iSize = sqlite3MallocSize(p); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -iSize); sqlite3_free(p); } @@ -712,7 +716,7 @@ static void pcache1Destroy(sqlite3_pcache *p){ ** already provided an alternative. */ void sqlite3PCacheSetDefault(void){ - static sqlite3_pcache_methods defaultMethods = { + static const sqlite3_pcache_methods defaultMethods = { 0, /* pArg */ pcache1Init, /* xInit */ pcache1Shutdown, /* xShutdown */ diff --git a/src/pragma.c b/src/pragma.c index bd96e47..2428965 100644 --- a/src/pragma.c +++ b/src/pragma.c @@ -173,6 +173,9 @@ static int flagPragma(Parse *pParse, const char *zLeft, const char *zRight){ { "legacy_file_format", SQLITE_LegacyFileFmt }, { "fullfsync", SQLITE_FullFSync }, { "reverse_unordered_selects", SQLITE_ReverseOrder }, +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX + { "automatic_index", SQLITE_AutoIndex }, +#endif #ifdef SQLITE_DEBUG { "sql_trace", SQLITE_SqlTrace }, { "vdbe_listing", SQLITE_VdbeListing }, @@ -254,6 +257,31 @@ static const char *actionName(u8 action){ } #endif + +/* +** Parameter eMode must be one of the PAGER_JOURNALMODE_XXX constants +** defined in pager.h. This function returns the associated lowercase +** journal-mode name. +*/ +const char *sqlite3JournalModename(int eMode){ + static char * const azModeName[] = { + "delete", "persist", "off", "truncate", "memory" +#ifndef SQLITE_OMIT_WAL + , "wal" +#endif + }; + assert( PAGER_JOURNALMODE_DELETE==0 ); + assert( PAGER_JOURNALMODE_PERSIST==1 ); + assert( PAGER_JOURNALMODE_OFF==2 ); + assert( PAGER_JOURNALMODE_TRUNCATE==3 ); + assert( PAGER_JOURNALMODE_MEMORY==4 ); + assert( PAGER_JOURNALMODE_WAL==5 ); + assert( eMode>=0 && eMode<=ArraySize(azModeName) ); + + if( eMode==ArraySize(azModeName) ) return 0; + return azModeName[eMode]; +} + /* ** Process a pragma statement. ** @@ -326,11 +354,11 @@ void sqlite3Pragma( ** page cache size value and the persistent page cache size value ** stored in the database file. ** - ** The default cache size is stored in meta-value 2 of page 1 of the - ** database file. The cache size is actually the absolute value of - ** this memory location. The sign of meta-value 2 determines the - ** synchronous setting. A negative value means synchronous is off - ** and a positive value means synchronous is on. + ** Older versions of SQLite would set the default cache size to a + ** negative number to indicate synchronous=OFF. These days, synchronous + ** is always on by default regardless of the sign of the default cache + ** size. But continue to take the absolute value of the default cache + ** size of historical compatibility. */ if( sqlite3StrICmp(zLeft,"default_cache_size")==0 ){ static const VdbeOpList getCacheSize[] = { @@ -359,10 +387,6 @@ void sqlite3Pragma( if( size<0 ) size = -size; sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3VdbeAddOp2(v, OP_Integer, size, 1); - sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, 2, BTREE_DEFAULT_CACHE_SIZE); - addr = sqlite3VdbeAddOp2(v, OP_IfPos, 2, 0); - sqlite3VdbeAddOp2(v, OP_Integer, -size, 1); - sqlite3VdbeJumpHere(v, addr); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, 1); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); @@ -507,62 +531,49 @@ void sqlite3Pragma( /* ** PRAGMA [database.]journal_mode - ** PRAGMA [database.]journal_mode = (delete|persist|off|truncate|memory) + ** PRAGMA [database.]journal_mode = + ** (delete|persist|off|truncate|memory|wal|off) */ if( sqlite3StrICmp(zLeft,"journal_mode")==0 ){ - int eMode; - static char * const azModeName[] = { - "delete", "persist", "off", "truncate", "memory" - }; + int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ + int ii; /* Loop counter */ - if( zRight==0 ){ - eMode = PAGER_JOURNALMODE_QUERY; - }else{ - int n = sqlite3Strlen30(zRight); - eMode = sizeof(azModeName)/sizeof(azModeName[0]) - 1; - while( eMode>=0 && sqlite3StrNICmp(zRight, azModeName[eMode], n)!=0 ){ - eMode--; - } + /* Force the schema to be loaded on all databases. This cases all + ** database files to be opened and the journal_modes set. */ + if( sqlite3ReadSchema(pParse) ){ + goto pragma_out; } - if( pId2->n==0 && eMode==PAGER_JOURNALMODE_QUERY ){ - /* Simple "PRAGMA journal_mode;" statement. This is a query for - ** the current default journal mode (which may be different to - ** the journal-mode of the main database). - */ - eMode = db->dfltJournalMode; - }else{ - Pager *pPager; - if( pId2->n==0 ){ - /* This indicates that no database name was specified as part - ** of the PRAGMA command. In this case the journal-mode must be - ** set on all attached databases, as well as the main db file. - ** - ** Also, the sqlite3.dfltJournalMode variable is set so that - ** any subsequently attached databases also use the specified - ** journal mode. - */ - int ii; - assert(pDb==&db->aDb[0]); - for(ii=1; iinDb; ii++){ - if( db->aDb[ii].pBt ){ - pPager = sqlite3BtreePager(db->aDb[ii].pBt); - sqlite3PagerJournalMode(pPager, eMode); - } - } - db->dfltJournalMode = (u8)eMode; - } - pPager = sqlite3BtreePager(pDb->pBt); - eMode = sqlite3PagerJournalMode(pPager, eMode); - } - assert( eMode==PAGER_JOURNALMODE_DELETE - || eMode==PAGER_JOURNALMODE_TRUNCATE - || eMode==PAGER_JOURNALMODE_PERSIST - || eMode==PAGER_JOURNALMODE_OFF - || eMode==PAGER_JOURNALMODE_MEMORY ); + sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "journal_mode", SQLITE_STATIC); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, - azModeName[eMode], P4_STATIC); + + if( zRight==0 ){ + /* If there is no "=MODE" part of the pragma, do a query for the + ** current mode */ + eMode = PAGER_JOURNALMODE_QUERY; + }else{ + const char *zMode; + int n = sqlite3Strlen30(zRight); + for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){ + if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break; + } + if( !zMode ){ + /* If the "=MODE" part does not match any known journal mode, + ** then do a query */ + eMode = PAGER_JOURNALMODE_QUERY; + } + } + if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){ + /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */ + iDb = 0; + pId2->n = 1; + } + for(ii=db->nDb-1; ii>=0; ii--){ + if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ + sqlite3VdbeUsesBtree(v, ii); + sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode); + } + } sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); }else @@ -1380,6 +1391,36 @@ void sqlite3Pragma( }else #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ +#ifndef SQLITE_OMIT_WAL + /* + ** PRAGMA [database.]wal_checkpoint + ** + ** Checkpoint the database. + */ + if( sqlite3StrICmp(zLeft, "wal_checkpoint")==0 ){ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeAddOp3(v, OP_Checkpoint, pId2->z?iDb:SQLITE_MAX_ATTACHED, 0, 0); + }else + + /* + ** PRAGMA wal_autocheckpoint + ** PRAGMA wal_autocheckpoint = N + ** + ** Configure a database connection to automatically checkpoint a database + ** after accumulating N frames in the log. Or query for the current value + ** of N. + */ + if( sqlite3StrICmp(zLeft, "wal_autocheckpoint")==0 ){ + if( zRight ){ + int nAuto = atoi(zRight); + sqlite3_wal_autocheckpoint(db, nAuto); + } + returnSingleInt(pParse, "wal_autocheckpoint", + db->xWalCallback==sqlite3WalDefaultHook ? + SQLITE_PTR_TO_INT(db->pWalArg) : 0); + }else +#endif + #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) /* ** Report the current state of file logs for all databases diff --git a/src/prepare.c b/src/prepare.c index f1b1e00..e510001 100644 --- a/src/prepare.c +++ b/src/prepare.c @@ -73,15 +73,18 @@ int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ ** or executed. All the parser does is build the internal data ** structures that describe the table, index, or view. */ - char *zErr; int rc; + sqlite3_stmt *pStmt; + TESTONLY(int rcp); /* Return code from sqlite3_prepare() */ + assert( db->init.busy ); db->init.iDb = iDb; db->init.newTnum = atoi(argv[1]); db->init.orphanTrigger = 0; - rc = sqlite3_exec(db, argv[2], 0, 0, &zErr); + TESTONLY(rcp = ) sqlite3_prepare(db, argv[2], -1, &pStmt, 0); + rc = db->errCode; + assert( (rc&0xFF)==(rcp&0xFF) ); db->init.iDb = 0; - assert( rc!=SQLITE_OK || zErr==0 ); if( SQLITE_OK!=rc ){ if( db->init.orphanTrigger ){ assert( iDb==1 ); @@ -89,12 +92,12 @@ int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ pData->rc = rc; if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; - }else if( rc!=SQLITE_INTERRUPT && rc!=SQLITE_LOCKED ){ - corruptSchema(pData, argv[0], zErr); + }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ + corruptSchema(pData, argv[0], sqlite3_errmsg(db)); } } - sqlite3DbFree(db, zErr); } + sqlite3_finalize(pStmt); }else if( argv[0]==0 ){ corruptSchema(pData, 0, 0); }else{ @@ -579,6 +582,7 @@ static int sqlite3Prepare( sqlite3VtabUnlockList(db); pParse->db = db; + pParse->nQueryLoop = (double)1; if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; @@ -600,6 +604,7 @@ static int sqlite3Prepare( }else{ sqlite3RunParser(pParse, zSql, &zErrMsg); } + assert( 1==(int)pParse->nQueryLoop ); if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM; diff --git a/src/resolve.c b/src/resolve.c index 3a44aef..74d6aae 100644 --- a/src/resolve.c +++ b/src/resolve.c @@ -357,6 +357,7 @@ static int lookupName( }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } + pParse->checkSchema = 1; pTopNC->nErr++; } @@ -403,7 +404,7 @@ lookupname_end: /* ** Allocate and return a pointer to an expression to load the column iCol -** from datasource iSrc datasource in SrcList pSrc. +** from datasource iSrc in SrcList pSrc. */ Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); @@ -415,6 +416,8 @@ Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ p->iColumn = -1; }else{ p->iColumn = (ynVar)iCol; + testcase( iCol==BMS ); + testcase( iCol==BMS-1 ); pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); } ExprSetProperty(p, EP_Resolved); diff --git a/src/select.c b/src/select.c index be4b95c..b03e506 100644 --- a/src/select.c +++ b/src/select.c @@ -2526,8 +2526,8 @@ static void substSelect( ** (14) The subquery does not use OFFSET ** ** (15) The outer query is not part of a compound select or the -** subquery does not have both an ORDER BY and a LIMIT clause. -** (See ticket #2339) +** subquery does not have a LIMIT clause. +** (See ticket #2339 and ticket [02a8e81d44]). ** ** (16) The outer query is not an aggregate or the subquery does ** not contain ORDER BY. (Ticket #2942) This used to not matter @@ -2610,7 +2610,7 @@ static int flattenSubquery( ** and (14). */ if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */ if( pSub->pOffset ) return 0; /* Restriction (14) */ - if( p->pRightmost && pSub->pLimit && pSub->pOrderBy ){ + if( p->pRightmost && pSub->pLimit ){ return 0; /* Restriction (15) */ } if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ @@ -3020,6 +3020,7 @@ int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ ); if( !pIdx ){ sqlite3ErrorMsg(pParse, "no such index: %s", zIndex, 0); + pParse->checkSchema = 1; return SQLITE_ERROR; } pFrom->pIndex = pIdx; @@ -3498,6 +3499,18 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ sqlite3ExprCacheClear(pParse); } } + + /* Before populating the accumulator registers, clear the column cache. + ** Otherwise, if any of the required column values are already present + ** in registers, sqlite3ExprCode() may use OP_SCopy to copy the value + ** to pC->iMem. But by the time the value is used, the original register + ** may have been used, invalidating the underlying buffer holding the + ** text or blob value. See ticket [883034dcb5]. + ** + ** Another solution would be to change the OP_SCopy used to copy cached + ** values to an OP_Copy. + */ + sqlite3ExprCacheClear(pParse); for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ sqlite3ExprCode(pParse, pC->pExpr, pC->iMem); } @@ -3706,6 +3719,18 @@ int sqlite3Select( isDistinct = 0; } + /* If there is both a GROUP BY and an ORDER BY clause and they are + ** identical, then disable the ORDER BY clause since the GROUP BY + ** will cause elements to come out in the correct order. This is + ** an optimization - the correct answer should result regardless. + ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER + ** to disable this optimization for testing purposes. + */ + if( sqlite3ExprListCompare(p->pGroupBy, pOrderBy)==0 + && (db->flags & SQLITE_GroupByOrder)==0 ){ + pOrderBy = 0; + } + /* If there is an ORDER BY clause, then this sorting ** index might end up being unused if the data can be ** extracted in pre-sorted order. If that is the case, then the diff --git a/src/shell.c b/src/shell.c index a1c0b5e..23099b9 100644 --- a/src/shell.c +++ b/src/shell.c @@ -2578,7 +2578,6 @@ int main(int argc, char **argv){ */ if( zFirstCmd[0]=='.' ){ rc = do_meta_command(zFirstCmd, &data); - return rc; }else{ open_db(&data); rc = shell_exec(data.db, zFirstCmd, shell_callback, &data, &zErrMsg); @@ -2625,9 +2624,10 @@ int main(int argc, char **argv){ } } set_table_name(&data, 0); - if( db ){ - if( sqlite3_close(db)!=SQLITE_OK ){ - fprintf(stderr,"Error: cannot close database \"%s\"\n", sqlite3_errmsg(db)); + if( data.db ){ + if( sqlite3_close(data.db)!=SQLITE_OK ){ + fprintf(stderr,"Error: cannot close database \"%s\"\n", + sqlite3_errmsg(db)); rc++; } } diff --git a/src/sqlite.h.in b/src/sqlite.h.in index f831496..a8c6be2 100644 --- a/src/sqlite.h.in +++ b/src/sqlite.h.in @@ -141,7 +141,6 @@ const char *sqlite3_libversion(void); const char *sqlite3_sourceid(void); int sqlite3_libversion_number(void); -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* ** CAPI3REF: Run-Time Library Compilation Options Diagnostics ** @@ -164,9 +163,10 @@ int sqlite3_libversion_number(void); ** See also: SQL functions [sqlite_compileoption_used()] and ** [sqlite_compileoption_get()] and the [compile_options pragma]. */ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS int sqlite3_compileoption_used(const char *zOptName); const char *sqlite3_compileoption_get(int N); -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ +#endif /* ** CAPI3REF: Test To See If The Library Is Threadsafe @@ -388,7 +388,7 @@ int sqlite3_exec( #define SQLITE_NOTFOUND 12 /* NOT USED. Table or record not found */ #define SQLITE_FULL 13 /* Insertion failed because database is full */ #define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* NOT USED. Database lock protocol error */ +#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ #define SQLITE_EMPTY 16 /* Database is empty */ #define SQLITE_SCHEMA 17 /* The database schema changed */ #define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ @@ -444,7 +444,12 @@ int sqlite3_exec( #define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) #define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) #define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) -#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8) ) +#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) +#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) +#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) +#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) +#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) +#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) /* ** CAPI3REF: Flags For File Open Operations @@ -471,11 +476,12 @@ int sqlite3_exec( #define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ /* ** CAPI3REF: Device Characteristics ** -** The xDeviceCapabilities method of the [sqlite3_io_methods] +** The xDeviceCharacteristics method of the [sqlite3_io_methods] ** object returns an integer which is a vector of the these ** bit values expressing I/O characteristics of the mass storage ** device that holds the file that the [sqlite3_io_methods] @@ -492,17 +498,18 @@ int sqlite3_exec( ** information is written to disk in the same order as calls ** to xWrite(). */ -#define SQLITE_IOCAP_ATOMIC 0x00000001 -#define SQLITE_IOCAP_ATOMIC512 0x00000002 -#define SQLITE_IOCAP_ATOMIC1K 0x00000004 -#define SQLITE_IOCAP_ATOMIC2K 0x00000008 -#define SQLITE_IOCAP_ATOMIC4K 0x00000010 -#define SQLITE_IOCAP_ATOMIC8K 0x00000020 -#define SQLITE_IOCAP_ATOMIC16K 0x00000040 -#define SQLITE_IOCAP_ATOMIC32K 0x00000080 -#define SQLITE_IOCAP_ATOMIC64K 0x00000100 -#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 -#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 +#define SQLITE_IOCAP_ATOMIC 0x00000001 +#define SQLITE_IOCAP_ATOMIC512 0x00000002 +#define SQLITE_IOCAP_ATOMIC1K 0x00000004 +#define SQLITE_IOCAP_ATOMIC2K 0x00000008 +#define SQLITE_IOCAP_ATOMIC4K 0x00000010 +#define SQLITE_IOCAP_ATOMIC8K 0x00000020 +#define SQLITE_IOCAP_ATOMIC16K 0x00000040 +#define SQLITE_IOCAP_ATOMIC32K 0x00000080 +#define SQLITE_IOCAP_ATOMIC64K 0x00000100 +#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 +#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 +#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 /* ** CAPI3REF: File Locking Levels @@ -653,6 +660,12 @@ struct sqlite3_io_methods { int (*xFileControl)(sqlite3_file*, int op, void *pArg); int (*xSectorSize)(sqlite3_file*); int (*xDeviceCharacteristics)(sqlite3_file*); + /* Methods above are valid for version 1 */ + int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); + int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); + void (*xShmBarrier)(sqlite3_file*); + int (*xShmUnmap)(sqlite3_file*, int deleteFlag); + /* Methods above are valid for version 2 */ /* Additional methods may be added in future releases */ }; @@ -670,11 +683,19 @@ struct sqlite3_io_methods { ** into an integer that the pArg argument points to. This capability ** is used during testing and only needs to be supported when SQLITE_TEST ** is defined. +** +** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS +** layer a hint of how large the database file will grow to be during the +** current transaction. This hint is not guaranteed to be accurate but it +** is often close. The underlying VFS might choose to preallocate database +** file space based on this hint in order to help writes to the database +** file run faster. */ #define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_GET_LOCKPROXYFILE 2 #define SQLITE_SET_LOCKPROXYFILE 3 #define SQLITE_LAST_ERRNO 4 +#define SQLITE_FCNTL_SIZE_HINT 5 /* ** CAPI3REF: Mutex Handle @@ -806,20 +827,27 @@ typedef struct sqlite3_mutex sqlite3_mutex; ** handled as a fatal error by SQLite, vfs implementations should endeavor ** to prevent this by setting mxPathname to a sufficiently large value. ** -** The xRandomness(), xSleep(), and xCurrentTime() interfaces -** are not strictly a part of the filesystem, but they are +** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() +** interfaces are not strictly a part of the filesystem, but they are ** included in the VFS structure for completeness. ** The xRandomness() function attempts to return nBytes bytes ** of good-quality randomness into zOut. The return value is ** the actual number of bytes of randomness obtained. ** The xSleep() method causes the calling thread to sleep for at ** least the number of microseconds given. The xCurrentTime() -** method returns a Julian Day Number for the current date and time. -** +** method returns a Julian Day Number for the current date and time as +** a floating point value. +** The xCurrentTimeInt64() method returns, as an integer, the Julian +** Day Number multipled by 86400000 (the number of milliseconds in +** a 24-hour day). +** ^SQLite will use the xCurrentTimeInt64() method to get the current +** date and time if that method is available (if iVersion is 2 or +** greater and the function pointer is not NULL) and will fall back +** to xCurrentTime() if xCurrentTimeInt64() is unavailable. */ typedef struct sqlite3_vfs sqlite3_vfs; struct sqlite3_vfs { - int iVersion; /* Structure version number */ + int iVersion; /* Structure version number (currently 2) */ int szOsFile; /* Size of subclassed sqlite3_file */ int mxPathname; /* Maximum file pathname length */ sqlite3_vfs *pNext; /* Next registered VFS */ @@ -838,8 +866,16 @@ struct sqlite3_vfs { int (*xSleep)(sqlite3_vfs*, int microseconds); int (*xCurrentTime)(sqlite3_vfs*, double*); int (*xGetLastError)(sqlite3_vfs*, int, char *); - /* New fields may be appended in figure versions. The iVersion - ** value will increment whenever this happens. */ + /* + ** The methods above are in version 1 of the sqlite_vfs object + ** definition. Those that follow are added in version 2 or later + */ + int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); + /* + ** The methods above are in versions 1 and 2 of the sqlite_vfs object. + ** New fields may be appended in figure versions. The iVersion + ** value will increment whenever this happens. + */ }; /* @@ -851,13 +887,58 @@ struct sqlite3_vfs { ** With SQLITE_ACCESS_EXISTS, the xAccess method ** simply checks whether the file exists. ** With SQLITE_ACCESS_READWRITE, the xAccess method -** checks whether the file is both readable and writable. +** checks whether the named directory is both readable and writable +** (in other words, if files can be added, removed, and renamed within +** the directory). +** The SQLITE_ACCESS_READWRITE constant is currently used only by the +** [temp_store_directory pragma], though this could change in a future +** release of SQLite. ** With SQLITE_ACCESS_READ, the xAccess method -** checks whether the file is readable. +** checks whether the file is readable. The SQLITE_ACCESS_READ constant is +** currently unused, though it might be used in a future release of +** SQLite. */ #define SQLITE_ACCESS_EXISTS 0 -#define SQLITE_ACCESS_READWRITE 1 -#define SQLITE_ACCESS_READ 2 +#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ +#define SQLITE_ACCESS_READ 2 /* Unused */ + +/* +** CAPI3REF: Flags for the xShmLock VFS method +** +** These integer constants define the various locking operations +** allowed by the xShmLock method of [sqlite3_io_methods]. The +** following are the only legal combinations of flags to the +** xShmLock method: +** +**
    +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE +**
+** +** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as +** was given no the corresponding lock. +** +** The xShmLock method can transition between unlocked and SHARED or +** between unlocked and EXCLUSIVE. It cannot transition between SHARED +** and EXCLUSIVE. +*/ +#define SQLITE_SHM_UNLOCK 1 +#define SQLITE_SHM_LOCK 2 +#define SQLITE_SHM_SHARED 4 +#define SQLITE_SHM_EXCLUSIVE 8 + +/* +** CAPI3REF: Maximum xShmLock index +** +** The xShmLock method on [sqlite3_io_methods] may use values +** between 0 and this upper bound as its "offset" argument. +** The SQLite core will never attempt to acquire or release a +** lock outside of this range +*/ +#define SQLITE_SHM_NLOCK 8 + /* ** CAPI3REF: Initialize The SQLite Library @@ -968,11 +1049,10 @@ int sqlite3_os_end(void); ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. */ -SQLITE_EXPERIMENTAL int sqlite3_config(int, ...); +int sqlite3_config(int, ...); /* ** CAPI3REF: Configure database connections -** EXPERIMENTAL ** ** The sqlite3_db_config() interface is used to make configuration ** changes to a [database connection]. The interface is similar to @@ -992,11 +1072,10 @@ SQLITE_EXPERIMENTAL int sqlite3_config(int, ...); ** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if ** the call is considered successful. */ -SQLITE_EXPERIMENTAL int sqlite3_db_config(sqlite3*, int op, ...); +int sqlite3_db_config(sqlite3*, int op, ...); /* ** CAPI3REF: Memory Allocation Routines -** EXPERIMENTAL ** ** An instance of this object defines the interface between SQLite ** and low-level memory allocation routines. @@ -1078,7 +1157,6 @@ struct sqlite3_mem_methods { /* ** CAPI3REF: Configuration Options -** EXPERIMENTAL ** ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. @@ -1264,6 +1342,24 @@ struct sqlite3_mem_methods { ** [sqlite3_pcache_methods] object. SQLite copies of the current ** page cache implementation into that object.)^ ** +**
SQLITE_CONFIG_LOG
+**
^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a +** function with a call signature of void(*)(void*,int,const char*), +** and a pointer to void. ^If the function pointer is not NULL, it is +** invoked by [sqlite3_log()] to process each logging event. ^If the +** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. +** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is +** passed through as the first parameter to the application-defined logger +** function whenever that function is invoked. ^The second parameter to +** the logger function is a copy of the first parameter to the corresponding +** [sqlite3_log()] call and is intended to be a [result code] or an +** [extended result code]. ^The third parameter passed to the logger is +** log message after formatting via [sqlite3_snprintf()]. +** The SQLite logging interface is not reentrant; the logger function +** supplied by the application must not invoke any SQLite interface. +** In a multi-threaded application, the application-defined logger +** function must be threadsafe.
+** ** */ #define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ @@ -1284,8 +1380,7 @@ struct sqlite3_mem_methods { #define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ /* -** CAPI3REF: Configuration Options -** EXPERIMENTAL +** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that ** can be passed as the second argument to the [sqlite3_db_config()] interface. @@ -2061,7 +2156,6 @@ int sqlite3_set_authorizer( /* ** CAPI3REF: Tracing And Profiling Functions -** EXPERIMENTAL ** ** These routines register callback functions that can be used for ** tracing and profiling the execution of SQL statements. @@ -2079,7 +2173,7 @@ int sqlite3_set_authorizer( ** the original statement text and an estimate of wall-clock time ** of how long that statement took to run. */ -SQLITE_EXPERIMENTAL void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); +void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*, void(*xProfile)(void*,const char*,sqlite3_uint64), void*); @@ -2872,6 +2966,14 @@ const void *sqlite3_column_decltype16(sqlite3_stmt*,int); ** be the case that the same database connection is being used by two or ** more threads at the same moment in time. ** +** For all versions of SQLite up to and including 3.6.23.1, it was required +** after sqlite3_step() returned anything other than [SQLITE_ROW] that +** [sqlite3_reset()] be called before any subsequent invocation of +** sqlite3_step(). Failure to invoke [sqlite3_reset()] in this way would +** result in an [SQLITE_MISUSE] return from sqlite3_step(). But after +** version 3.6.23.1, sqlite3_step() began calling [sqlite3_reset()] +** automatically in this circumstance rather than returning [SQLITE_MISUSE]. +** ** Goofy Interface Alert: In the legacy interface, the sqlite3_step() ** API always returns a generic error code, [SQLITE_ERROR], following any ** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call @@ -3684,7 +3786,7 @@ int sqlite3_collation_needed16( void(*)(void*,sqlite3*,int eTextRep,const void*) ); -#if SQLITE_HAS_CODEC +#ifdef SQLITE_HAS_CODEC /* ** Specify the key for an encrypted database. This routine should be ** called right after sqlite3_open(). @@ -3867,8 +3969,6 @@ sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); ** an error or constraint causes an implicit rollback to occur. ** ^The rollback callback is not invoked if a transaction is ** automatically rolled back because the database connection is closed. -** ^The rollback callback is not invoked if a transaction is -** rolled back because a commit callback returned non-zero. ** ** See also the [sqlite3_update_hook()] interface. */ @@ -4154,8 +4254,6 @@ int sqlite3_auto_extension(void (*xEntryPoint)(void)); void sqlite3_reset_auto_extension(void); /* -****** EXPERIMENTAL - subject to change without notice ************** -** ** The interface to the virtual-table mechanism is currently considered ** to be experimental. The interface might change in incompatible ways. ** If this is a problem for you, do not use the interface at this time. @@ -4175,7 +4273,6 @@ typedef struct sqlite3_module sqlite3_module; /* ** CAPI3REF: Virtual Table Object ** KEYWORDS: sqlite3_module {virtual table module} -** EXPERIMENTAL ** ** This structure, sometimes called a a "virtual table module", ** defines the implementation of a [virtual tables]. @@ -4222,7 +4319,6 @@ struct sqlite3_module { /* ** CAPI3REF: Virtual Table Indexing Information ** KEYWORDS: sqlite3_index_info -** EXPERIMENTAL ** ** The sqlite3_index_info structure and its substructures is used to ** pass information into and receive the reply from the [xBestIndex] @@ -4304,7 +4400,6 @@ struct sqlite3_index_info { /* ** CAPI3REF: Register A Virtual Table Implementation -** EXPERIMENTAL ** ** ^These routines are used to register a new [virtual table module] name. ** ^Module names must be registered before @@ -4326,13 +4421,13 @@ struct sqlite3_index_info { ** interface is equivalent to sqlite3_create_module_v2() with a NULL ** destructor. */ -SQLITE_EXPERIMENTAL int sqlite3_create_module( +int sqlite3_create_module( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ const sqlite3_module *p, /* Methods for the module */ void *pClientData /* Client data for xCreate/xConnect */ ); -SQLITE_EXPERIMENTAL int sqlite3_create_module_v2( +int sqlite3_create_module_v2( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ const sqlite3_module *p, /* Methods for the module */ @@ -4343,7 +4438,6 @@ SQLITE_EXPERIMENTAL int sqlite3_create_module_v2( /* ** CAPI3REF: Virtual Table Instance Object ** KEYWORDS: sqlite3_vtab -** EXPERIMENTAL ** ** Every [virtual table module] implementation uses a subclass ** of this object to describe a particular instance @@ -4369,7 +4463,6 @@ struct sqlite3_vtab { /* ** CAPI3REF: Virtual Table Cursor Object ** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} -** EXPERIMENTAL ** ** Every [virtual table module] implementation uses a subclass of the ** following structure to describe cursors that point into the @@ -4391,18 +4484,16 @@ struct sqlite3_vtab_cursor { /* ** CAPI3REF: Declare The Schema Of A Virtual Table -** EXPERIMENTAL ** ** ^The [xCreate] and [xConnect] methods of a ** [virtual table module] call this interface ** to declare the format (the names and datatypes of the columns) of ** the virtual tables they implement. */ -SQLITE_EXPERIMENTAL int sqlite3_declare_vtab(sqlite3*, const char *zSQL); +int sqlite3_declare_vtab(sqlite3*, const char *zSQL); /* ** CAPI3REF: Overload A Function For A Virtual Table -** EXPERIMENTAL ** ** ^(Virtual tables can provide alternative implementations of functions ** using the [xFindFunction] method of the [virtual table module]. @@ -4417,7 +4508,7 @@ SQLITE_EXPERIMENTAL int sqlite3_declare_vtab(sqlite3*, const char *zSQL); ** purpose is to be a placeholder function that can be overloaded ** by a [virtual table]. */ -SQLITE_EXPERIMENTAL int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); +int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* ** The interface to the virtual-table mechanism defined above (back up @@ -4427,8 +4518,6 @@ SQLITE_EXPERIMENTAL int sqlite3_overload_function(sqlite3*, const char *zFuncNam ** ** When the virtual-table mechanism stabilizes, we will declare the ** interface fixed, support it indefinitely, and remove this comment. -** -****** EXPERIMENTAL - subject to change without notice ************** */ /* @@ -4771,7 +4860,6 @@ void sqlite3_mutex_leave(sqlite3_mutex*); /* ** CAPI3REF: Mutex Methods Object -** EXPERIMENTAL ** ** An instance of this structure defines the low-level routines ** used to allocate and use mutexes. @@ -4984,11 +5072,11 @@ int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_RESERVE 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 -#define SQLITE_TESTCTRL_LAST 16 +#define SQLITE_TESTCTRL_PGHDRSZ 17 +#define SQLITE_TESTCTRL_LAST 17 /* ** CAPI3REF: SQLite Runtime Status -** EXPERIMENTAL ** ** ^This interface is used to retrieve runtime status information ** about the preformance of SQLite, and optionally to reset various @@ -5016,12 +5104,11 @@ int sqlite3_test_control(int op, ...); ** ** See also: [sqlite3_db_status()] */ -SQLITE_EXPERIMENTAL int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); +int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); /* ** CAPI3REF: Status Parameters -** EXPERIMENTAL ** ** These integer constants designate various run-time status parameters ** that can be returned by [sqlite3_status()]. @@ -5108,14 +5195,15 @@ SQLITE_EXPERIMENTAL int sqlite3_status(int op, int *pCurrent, int *pHighwater, i /* ** CAPI3REF: Database Connection Status -** EXPERIMENTAL ** ** ^This interface is used to retrieve runtime status information ** about a single [database connection]. ^The first argument is the ** database connection object to be interrogated. ^The second argument -** is the parameter to interrogate. ^Currently, the only allowed value -** for the second parameter is [SQLITE_DBSTATUS_LOOKASIDE_USED]. -** Additional options will likely appear in future releases of SQLite. +** is an integer constant, taken from the set of +** [SQLITE_DBSTATUS_LOOKASIDE_USED | SQLITE_DBSTATUS_*] macros, that +** determiness the parameter to interrogate. The set of +** [SQLITE_DBSTATUS_LOOKASIDE_USED | SQLITE_DBSTATUS_*] macros is likely +** to grow in future releases of SQLite. ** ** ^The current value of the requested parameter is written into *pCur ** and the highest instantaneous value is written into *pHiwtr. ^If @@ -5124,11 +5212,10 @@ SQLITE_EXPERIMENTAL int sqlite3_status(int op, int *pCurrent, int *pHighwater, i ** ** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. */ -SQLITE_EXPERIMENTAL int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); +int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); /* ** CAPI3REF: Status Parameters for database connections -** EXPERIMENTAL ** ** These constants are the available integer "verbs" that can be passed as ** the second argument to the [sqlite3_db_status()] interface. @@ -5143,14 +5230,21 @@ SQLITE_EXPERIMENTAL int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiw ** ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
**
This parameter returns the number of lookaside memory slots currently ** checked out.
)^ +** +**
SQLITE_DBSTATUS_CACHE_USED
+**
^This parameter returns the approximate number of of bytes of heap +** memory used by all pager caches associated with the database connection. +** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +**
** */ #define SQLITE_DBSTATUS_LOOKASIDE_USED 0 +#define SQLITE_DBSTATUS_CACHE_USED 1 +#define SQLITE_DBSTATUS_MAX 1 /* Largest defined DBSTATUS */ /* ** CAPI3REF: Prepared Statement Status -** EXPERIMENTAL ** ** ^(Each prepared statement maintains various ** [SQLITE_STMTSTATUS_SORT | counters] that measure the number @@ -5172,11 +5266,10 @@ SQLITE_EXPERIMENTAL int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiw ** ** See also: [sqlite3_status()] and [sqlite3_db_status()]. */ -SQLITE_EXPERIMENTAL int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); +int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); /* ** CAPI3REF: Status Parameters for prepared statements -** EXPERIMENTAL ** ** These preprocessor macros define integer codes that name counter ** values associated with the [sqlite3_stmt_status()] interface. @@ -5194,14 +5287,21 @@ SQLITE_EXPERIMENTAL int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** A non-zero value in this counter may indicate an opportunity to ** improvement performance through careful use of indices. ** +**
SQLITE_STMTSTATUS_AUTOINDEX
+**
^This is the number of rows inserted into transient indices that +** were created automatically in order to help joins run faster. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance by adding permanent indices that do not +** need to be reinitialized each time the statement is run.
+** ** */ #define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 #define SQLITE_STMTSTATUS_SORT 2 +#define SQLITE_STMTSTATUS_AUTOINDEX 3 /* ** CAPI3REF: Custom Page Cache Object -** EXPERIMENTAL ** ** The sqlite3_pcache type is opaque. It is implemented by ** the pluggable module. The SQLite core has no knowledge of @@ -5216,7 +5316,6 @@ typedef struct sqlite3_pcache sqlite3_pcache; /* ** CAPI3REF: Application Defined Page Cache. ** KEYWORDS: {page cache} -** EXPERIMENTAL ** ** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE], ...) interface can ** register an alternative page cache implementation by passing in an @@ -5358,7 +5457,6 @@ struct sqlite3_pcache_methods { /* ** CAPI3REF: Online Backup Object -** EXPERIMENTAL ** ** The sqlite3_backup object records state information about an ongoing ** online backup operation. ^The sqlite3_backup object is created by @@ -5371,7 +5469,6 @@ typedef struct sqlite3_backup sqlite3_backup; /* ** CAPI3REF: Online Backup API. -** EXPERIMENTAL ** ** The backup API copies the content of one database into another. ** It is useful either for creating backups of databases or @@ -5440,10 +5537,14 @@ typedef struct sqlite3_backup sqlite3_backup; ** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an ** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. ** -** ^The sqlite3_backup_step() might return [SQLITE_READONLY] if the destination -** database was opened read-only or if -** the destination is an in-memory database with a different page size -** from the source database. +** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if +**
    +**
  1. the destination database was opened read-only, or +**
  2. the destination database is using write-ahead-log journaling +** and the destination and source page sizes differ, or +**
  3. The destination database is an in-memory database and the +** destination and source page sizes differ. +**
)^ ** ** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then ** the [sqlite3_busy_handler | busy-handler function] @@ -5559,7 +5660,6 @@ int sqlite3_backup_pagecount(sqlite3_backup *p); /* ** CAPI3REF: Unlock Notification -** EXPERIMENTAL ** ** ^When running in shared-cache mode, a database operation may fail with ** an [SQLITE_LOCKED] error if the required locks on the shared-cache or @@ -5681,7 +5781,6 @@ int sqlite3_unlock_notify( /* ** CAPI3REF: String Comparison -** EXPERIMENTAL ** ** ^The [sqlite3_strnicmp()] API allows applications and extensions to ** compare the contents of two buffers containing UTF-8 strings in a @@ -5692,12 +5791,11 @@ int sqlite3_strnicmp(const char *, const char *, int); /* ** CAPI3REF: Error Logging Interface -** EXPERIMENTAL ** ** ^The [sqlite3_log()] interface writes a message into the error log ** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. ** ^If logging is enabled, the zFormat string and subsequent arguments are -** passed through to [sqlite3_vmprintf()] to generate the final output string. +** used with [sqlite3_snprintf()] to generate the final output string. ** ** The sqlite3_log() interface is intended for use by extensions such as ** virtual tables, collating functions, and SQL functions. While there is @@ -5714,6 +5812,89 @@ int sqlite3_strnicmp(const char *, const char *, int); */ void sqlite3_log(int iErrCode, const char *zFormat, ...); +/* +** CAPI3REF: Write-Ahead Log Commit Hook +** +** ^The [sqlite3_wal_hook()] function is used to register a callback that +** will be invoked each time a database connection commits data to a +** [write-ahead log] (i.e. whenever a transaction is committed in +** [journal_mode | journal_mode=WAL mode]). +** +** ^The callback is invoked by SQLite after the commit has taken place and +** the associated write-lock on the database released, so the implementation +** may read, write or [checkpoint] the database as required. +** +** ^The first parameter passed to the callback function when it is invoked +** is a copy of the third parameter passed to sqlite3_wal_hook() when +** registering the callback. ^The second is a copy of the database handle. +** ^The third parameter is the name of the database that was written to - +** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter +** is the number of pages currently in the write-ahead log file, +** including those that were just committed. +** +** The callback function should normally return [SQLITE_OK]. ^If an error +** code is returned, that error will propagate back up through the +** SQLite code base to cause the statement that provoked the callback +** to report an error, though the commit will have still occurred. If the +** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value +** that does not correspond to any valid SQLite error code, the results +** are undefined. +** +** A single database handle may have at most a single write-ahead log callback +** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any +** previously registered write-ahead log callback. ^Note that the +** [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will +** those overwrite any prior [sqlite3_wal_hook()] settings. +*/ +void *sqlite3_wal_hook( + sqlite3*, + int(*)(void *,sqlite3*,const char*,int), + void* +); + +/* +** CAPI3REF: Configure an auto-checkpoint +** +** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around +** [sqlite3_wal_hook()] that causes any database on [database connection] D +** to automatically [checkpoint] +** after committing a transaction if there are N or +** more frames in the [write-ahead log] file. ^Passing zero or +** a negative value as the nFrame parameter disables automatic +** checkpoints entirely. +** +** ^The callback registered by this function replaces any existing callback +** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback +** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism +** configured by this function. +** +** ^The [wal_autocheckpoint pragma] can be used to invoke this interface +** from SQL. +** +** ^Every new [database connection] defaults to having the auto-checkpoint +** enabled with a threshold of 1000 pages. The use of this interface +** is only necessary if the default setting is found to be suboptimal +** for a particular application. +*/ +int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); + +/* +** CAPI3REF: Checkpoint a database +** +** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X +** on [database connection] D to be [checkpointed]. ^If X is NULL or an +** empty string, then a checkpoint is run on all databases of +** connection D. ^If the database connection D is not in +** [WAL | write-ahead log mode] then this interface is a harmless no-op. +** +** ^The [wal_checkpoint pragma] can be used to invoke this interface +** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] can be used to cause this interface to be +** run whenever the WAL reaches a certain size threshold. +*/ +int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. diff --git a/src/sqliteInt.h b/src/sqliteInt.h index 5720b9c..15fedf5 100644 --- a/src/sqliteInt.h +++ b/src/sqliteInt.h @@ -92,7 +92,7 @@ ** The correct "ANSI" way to do this is to use the intptr_t type. ** Unfortunately, that typedef is not available on all compilers, or ** if it is available, it requires an #include of specific headers -** that very from one machine to the next. +** that vary from one machine to the next. ** ** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on ** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). @@ -272,6 +272,13 @@ # define NEVER(X) (X) #endif +/* +** Return true (non-zero) if the input is a integer that is too large +** to fit in 32-bits. This macro is used inside of various testcase() +** macros to verify that we have tested SQLite for large-file support. +*/ +#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) + /* ** The macro unlikely() is a hint that surrounds a boolean ** expression that is usually false. Macro likely() surrounds @@ -301,6 +308,7 @@ */ #ifdef SQLITE_OMIT_FLOATING_POINT # define double sqlite_int64 +# define float sqlite_int64 # define LONGDOUBLE_TYPE sqlite_int64 # ifndef SQLITE_BIG_DBL # define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) @@ -789,7 +797,6 @@ struct sqlite3 { u8 temp_store; /* 1: file 2: memory 0: default */ u8 mallocFailed; /* True if we have seen a malloc failure */ u8 dfltLockMode; /* Default locking-mode for attached dbs */ - u8 dfltJournalMode; /* Default journal mode for attached dbs */ signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ u8 suppressErr; /* Do not issue error messages if true */ int nextPagesize; /* Pagesize after VACUUM if >0 */ @@ -822,6 +829,10 @@ struct sqlite3 { void (*xRollbackCallback)(void*); /* Invoked at every commit. */ void *pUpdateArg; void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); +#ifndef SQLITE_OMIT_WAL + int (*xWalCallback)(void *, sqlite3 *, const char *, int); + void *pWalArg; +#endif void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); void *pCollNeededArg; @@ -911,6 +922,8 @@ struct sqlite3 { #define SQLITE_ReverseOrder 0x01000000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x02000000 /* Enable recursive triggers */ #define SQLITE_ForeignKeys 0x04000000 /* Enforce foreign key constraints */ +#define SQLITE_AutoIndex 0x08000000 /* Enable automatic indexes */ +#define SQLITE_PreferBuiltin 0x10000000 /* Preference to built-in funcs */ /* ** Bits of the sqlite3.flags field that are used by the @@ -922,7 +935,8 @@ struct sqlite3 { #define SQLITE_IndexSort 0x04 /* Disable indexes for sorting */ #define SQLITE_IndexSearch 0x08 /* Disable indexes for searching */ #define SQLITE_IndexCover 0x10 /* Disable index covering table */ -#define SQLITE_OptMask 0x1f /* Mask of all disablable opts */ +#define SQLITE_GroupByOrder 0x20 /* Disable GROUPBY cover of ORDERBY */ +#define SQLITE_OptMask 0xff /* Mask of all disablable opts */ /* ** Possible values for the sqlite.magic field. @@ -1773,6 +1787,9 @@ typedef u64 Bitmask; ** and the next table on the list. The parser builds the list this way. ** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each ** jointype expresses the join between the table and the previous table. +** +** In the colUsed field, the high-order bit (bit 63) is set if the table +** contains more than 63 columns and the 64-th or later column is used. */ struct SrcList { i16 nSrc; /* Number of tables or subqueries in the FROM clause */ @@ -1884,7 +1901,7 @@ struct WhereLevel { #define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ #define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ #define WHERE_DUPLICATES_OK 0x0008 /* Ok to return a row more than once */ -#define WHERE_OMIT_OPEN 0x0010 /* Table cursor are already open */ +#define WHERE_OMIT_OPEN 0x0010 /* Table cursors are already open */ #define WHERE_OMIT_CLOSE 0x0020 /* Omit close of table & index cursors */ #define WHERE_FORCE_TABLE 0x0040 /* Do not use an index-only search */ #define WHERE_ONETABLE_ONLY 0x0080 /* Only code the 1st table in pTabList */ @@ -1907,6 +1924,7 @@ struct WhereInfo { int iBreak; /* Jump here to break out of the loop */ int nLevel; /* Number of nested loop */ struct WhereClause *pWC; /* Decomposition of the WHERE clause */ + double savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ WhereLevel a[1]; /* Information about each nest loop in WHERE */ }; @@ -2148,6 +2166,7 @@ struct Parse { u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ u8 disableTriggers; /* True to disable triggers */ + double nQueryLoop; /* Estimated number of iterations of a query */ /* Above is constant between recursions. Below is reset before and after ** each recursion */ @@ -2523,7 +2542,8 @@ const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); #ifndef SQLITE_MUTEX_OMIT - sqlite3_mutex_methods *sqlite3DefaultMutex(void); + sqlite3_mutex_methods const *sqlite3DefaultMutex(void); + sqlite3_mutex_methods const *sqlite3NoopMutex(void); sqlite3_mutex *sqlite3MutexAlloc(int); int sqlite3MutexInit(void); int sqlite3MutexEnd(void); @@ -2655,6 +2675,7 @@ void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); WhereInfo *sqlite3WhereBegin(Parse*, SrcList*, Expr*, ExprList**, u16); void sqlite3WhereEnd(WhereInfo*); int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int); +void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); void sqlite3ExprCodeMove(Parse*, int, int, int); void sqlite3ExprCodeCopy(Parse*, int, int, int); void sqlite3ExprCacheStore(Parse*, int, int, int); @@ -2681,6 +2702,7 @@ void sqlite3Vacuum(Parse*); int sqlite3RunVacuum(char**, sqlite3*); char *sqlite3NameFromToken(sqlite3*, Token*); int sqlite3ExprCompare(Expr*, Expr*); +int sqlite3ExprListCompare(ExprList*, ExprList*); void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); Vdbe *sqlite3GetVdbe(Parse*); @@ -2868,13 +2890,16 @@ void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); extern const unsigned char sqlite3OpcodeProperty[]; extern const unsigned char sqlite3UpperToLower[]; extern const unsigned char sqlite3CtypeMap[]; +extern const Token sqlite3IntTokens[]; extern SQLITE_WSD struct Sqlite3Config sqlite3Config; extern SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; +#ifndef SQLITE_OMIT_WSD extern int sqlite3PendingByte; #endif +#endif void sqlite3RootPageMoved(Db*, int, int); void sqlite3Reindex(Parse*, Token*, Token*); -void sqlite3AlterFunctions(sqlite3*); +void sqlite3AlterFunctions(void); void sqlite3AlterRenameTable(Parse*, SrcList*, Token*); int sqlite3GetToken(const unsigned char *, int *); void sqlite3NestedParse(Parse*, const char*, ...); @@ -2983,6 +3008,9 @@ void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*); CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *); int sqlite3TempInMemory(const sqlite3*); VTable *sqlite3GetVTable(sqlite3*, Table*); +const char *sqlite3JournalModename(int); +int sqlite3Checkpoint(sqlite3*, int); +int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); /* Declarations for functions in fkey.c. All of these are replaced by ** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign @@ -3089,4 +3117,43 @@ SQLITE_EXTERN void (*sqlite3IoTrace)(const char*,...); # define sqlite3VdbeIOTraceSql(X) #endif +/* +** These routines are available for the mem2.c debugging memory allocator +** only. They are used to verify that different "types" of memory +** allocations are properly tracked by the system. +** +** sqlite3MemdebugSetType() sets the "type" of an allocation to one of +** the MEMTYPE_* macros defined below. The type must be a bitmask with +** a single bit set. +** +** sqlite3MemdebugHasType() returns true if any of the bits in its second +** argument match the type set by the previous sqlite3MemdebugSetType(). +** sqlite3MemdebugHasType() is intended for use inside assert() statements. +** For example: +** +** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); +** +** Perhaps the most important point is the difference between MEMTYPE_HEAP +** and MEMTYPE_DB. If an allocation is MEMTYPE_DB, that means it might have +** been allocated by lookaside, except the allocation was too large or +** lookaside was already full. It is important to verify that allocations +** that might have been satisfied by lookaside are not passed back to +** non-lookaside free() routines. Asserts such as the example above are +** placed on the non-lookaside free() routines to verify this constraint. +** +** All of this is no-op for a production build. It only comes into +** play when the SQLITE_MEMDEBUG compile-time option is used. +*/ +#ifdef SQLITE_MEMDEBUG + void sqlite3MemdebugSetType(void*,u8); + int sqlite3MemdebugHasType(void*,u8); +#else +# define sqlite3MemdebugSetType(X,Y) /* no-op */ +# define sqlite3MemdebugHasType(X,Y) 1 #endif +#define MEMTYPE_HEAP 0x01 /* General heap allocations */ +#define MEMTYPE_DB 0x02 /* Associated with a database connection */ +#define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */ +#define MEMTYPE_PCACHE 0x08 /* Page cache allocations */ + +#endif /* _SQLITEINT_H_ */ diff --git a/src/sqliteLimit.h b/src/sqliteLimit.h index faf80d4..1dc5d67 100644 --- a/src/sqliteLimit.h +++ b/src/sqliteLimit.h @@ -108,6 +108,14 @@ # define SQLITE_DEFAULT_TEMP_CACHE_SIZE 500 #endif +/* +** The default number of frames to accumulate in the log file before +** checkpointing the database in WAL mode. +*/ +#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT +# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000 +#endif + /* ** The maximum number of attached databases. This must be between 0 ** and 30. The upper bound on 30 is because a 32-bit integer bitmap diff --git a/src/status.c b/src/status.c index f4c77a9..f310f36 100644 --- a/src/status.c +++ b/src/status.c @@ -112,6 +112,26 @@ int sqlite3_db_status( } break; } + + /* + ** Return an approximation for the amount of memory currently used + ** by all pagers associated with the given database connection. The + ** highwater mark is meaningless and is returned as zero. + */ + case SQLITE_DBSTATUS_CACHE_USED: { + int totalUsed = 0; + int i; + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + Pager *pPager = sqlite3BtreePager(pBt); + totalUsed += sqlite3PagerMemUsed(pPager); + } + } + *pCurrent = totalUsed; + *pHighwater = 0; + break; + } default: { return SQLITE_ERROR; } diff --git a/src/tclsqlite.c b/src/tclsqlite.c index 7048e4e..a2d352c 100644 --- a/src/tclsqlite.c +++ b/src/tclsqlite.c @@ -123,6 +123,7 @@ struct SqliteDb { SqlFunc *pFunc; /* List of SQL functions */ Tcl_Obj *pUpdateHook; /* Update hook script (if any) */ Tcl_Obj *pRollbackHook; /* Rollback hook script (if any) */ + Tcl_Obj *pWalHook; /* WAL hook script (if any) */ Tcl_Obj *pUnlockNotify; /* Unlock notify script (if any) */ SqlCollate *pCollate; /* List of SQL collation functions */ int rc; /* Return code of most recent sqlite3_exec() */ @@ -132,7 +133,7 @@ struct SqliteDb { int maxStmt; /* The next maximum number of stmtList */ int nStmt; /* Number of statements in stmtList */ IncrblobChannel *pIncrblob;/* Linked list of open incrblob channels */ - int nStep, nSort; /* Statistics for most recent operation */ + int nStep, nSort, nIndex; /* Statistics for most recent operation */ int nTransaction; /* Number of nested [transaction] methods */ }; @@ -485,6 +486,9 @@ static void DbDeleteCmd(void *db){ if( pDb->pRollbackHook ){ Tcl_DecrRefCount(pDb->pRollbackHook); } + if( pDb->pWalHook ){ + Tcl_DecrRefCount(pDb->pWalHook); + } if( pDb->pCollateNeeded ){ Tcl_DecrRefCount(pDb->pCollateNeeded); } @@ -589,6 +593,35 @@ static void DbRollbackHandler(void *clientData){ } } +/* +** This procedure handles wal_hook callbacks. +*/ +static int DbWalHandler( + void *clientData, + sqlite3 *db, + const char *zDb, + int nEntry +){ + int ret = SQLITE_OK; + Tcl_Obj *p; + SqliteDb *pDb = (SqliteDb*)clientData; + Tcl_Interp *interp = pDb->interp; + assert(pDb->pWalHook); + + p = Tcl_DuplicateObj(pDb->pWalHook); + Tcl_IncrRefCount(p); + Tcl_ListObjAppendElement(interp, p, Tcl_NewStringObj(zDb, -1)); + Tcl_ListObjAppendElement(interp, p, Tcl_NewIntObj(nEntry)); + if( TCL_OK!=Tcl_EvalObjEx(interp, p, 0) + || TCL_OK!=Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &ret) + ){ + Tcl_BackgroundError(interp); + } + Tcl_DecrRefCount(p); + + return ret; +} + #if defined(SQLITE_TEST) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) static void setTestUnlockNotifyVars(Tcl_Interp *interp, int iArg, int nArg){ char zBuf[64]; @@ -1351,6 +1384,7 @@ static int dbEvalStep(DbEvalContext *p){ pDb->nStep = sqlite3_stmt_status(pStmt,SQLITE_STMTSTATUS_FULLSCAN_STEP,1); pDb->nSort = sqlite3_stmt_status(pStmt,SQLITE_STMTSTATUS_SORT,1); + pDb->nIndex = sqlite3_stmt_status(pStmt,SQLITE_STMTSTATUS_AUTOINDEX,1); dbReleaseColumnNames(p); p->pPreStmt = 0; @@ -1544,7 +1578,7 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ "restore", "rollback_hook", "status", "timeout", "total_changes", "trace", "transaction", "unlock_notify", "update_hook", - "version", 0 + "version", "wal_hook", 0 }; enum DB_enum { DB_AUTHORIZER, DB_BACKUP, DB_BUSY, @@ -1558,7 +1592,7 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ DB_RESTORE, DB_ROLLBACK_HOOK, DB_STATUS, DB_TIMEOUT, DB_TOTAL_CHANGES, DB_TRACE, DB_TRANSACTION, DB_UNLOCK_NOTIFY, DB_UPDATE_HOOK, - DB_VERSION, + DB_VERSION, DB_WAL_HOOK }; /* don't leave trailing commas on DB_enum, it confuses the AIX xlc compiler */ @@ -2528,7 +2562,7 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ } /* - ** $db status (step|sort) + ** $db status (step|sort|autoindex) ** ** Display SQLITE_STMTSTATUS_FULLSCAN_STEP or ** SQLITE_STMTSTATUS_SORT for the most recent eval. @@ -2545,8 +2579,11 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ v = pDb->nStep; }else if( strcmp(zOp, "sort")==0 ){ v = pDb->nSort; + }else if( strcmp(zOp, "autoindex")==0 ){ + v = pDb->nIndex; }else{ - Tcl_AppendResult(interp, "bad argument: should be step or sort", + Tcl_AppendResult(interp, + "bad argument: should be autoindex, step, or sort", (char*)0); return TCL_ERROR; } @@ -2726,9 +2763,11 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ } /* + ** $db wal_hook ?script? ** $db update_hook ?script? ** $db rollback_hook ?script? */ + case DB_WAL_HOOK: case DB_UPDATE_HOOK: case DB_ROLLBACK_HOOK: { @@ -2738,6 +2777,8 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ Tcl_Obj **ppHook; if( choice==DB_UPDATE_HOOK ){ ppHook = &pDb->pUpdateHook; + }else if( choice==DB_WAL_HOOK ){ + ppHook = &pDb->pWalHook; }else{ ppHook = &pDb->pRollbackHook; } @@ -2763,6 +2804,7 @@ static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ sqlite3_update_hook(pDb->db, (pDb->pUpdateHook?DbUpdateHandler:0), pDb); sqlite3_rollback_hook(pDb->db,(pDb->pRollbackHook?DbRollbackHandler:0),pDb); + sqlite3_wal_hook(pDb->db,(pDb->pWalHook?DbWalHandler:0),pDb); break; } @@ -2855,8 +2897,7 @@ static int DbMain(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ if( strcmp(zArg,"-key")==0 ){ pKey = Tcl_GetByteArrayFromObj(objv[i+1], &nKey); }else if( strcmp(zArg, "-vfs")==0 ){ - i++; - zVfs = Tcl_GetString(objv[i]); + zVfs = Tcl_GetString(objv[i+1]); }else if( strcmp(zArg, "-readonly")==0 ){ int b; if( Tcl_GetBooleanFromObj(interp, objv[i+1], &b) ) return TCL_ERROR; @@ -3447,22 +3488,55 @@ static char zMainloop[] = "}\n" ; #endif +#if TCLSH==2 +static char zMainloop[] = +#include "spaceanal_tcl.h" +; +#endif -#define TCLSH_MAIN main /* Needed to fake out mktclapp */ -int TCLSH_MAIN(int argc, char **argv){ - Tcl_Interp *interp; - - /* Call sqlite3_shutdown() once before doing anything else. This is to - ** test that sqlite3_shutdown() can be safely called by a process before - ** sqlite3_initialize() is. */ - sqlite3_shutdown(); +#ifdef SQLITE_TEST +static void init_all(Tcl_Interp *); +static int init_all_cmd( + ClientData cd, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ - Tcl_FindExecutable(argv[0]); - interp = Tcl_CreateInterp(); + Tcl_Interp *slave; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SLAVE"); + return TCL_ERROR; + } + + slave = Tcl_GetSlave(interp, Tcl_GetString(objv[1])); + if( !slave ){ + return TCL_ERROR; + } + + init_all(slave); + return TCL_OK; +} +#endif + +/* +** Configure the interpreter passed as the first argument to have access +** to the commands and linked variables that make up: +** +** * the [sqlite3] extension itself, +** +** * If SQLITE_TCLMD5 or SQLITE_TEST is defined, the Md5 commands, and +** +** * If SQLITE_TEST is set, the various test interfaces used by the Tcl +** test suite. +*/ +static void init_all(Tcl_Interp *interp){ Sqlite3_Init(interp); + #if defined(SQLITE_TEST) || defined(SQLITE_TCLMD5) Md5_Init(interp); #endif + #ifdef SQLITE_TEST { extern int Sqliteconfig_Init(Tcl_Interp*); @@ -3477,6 +3551,7 @@ int TCLSH_MAIN(int argc, char **argv){ extern int Sqlitetest9_Init(Tcl_Interp*); extern int Sqlitetestasync_Init(Tcl_Interp*); extern int Sqlitetest_autoext_Init(Tcl_Interp*); + extern int Sqlitetest_demovfs_Init(Tcl_Interp *); extern int Sqlitetest_func_Init(Tcl_Interp*); extern int Sqlitetest_hexio_Init(Tcl_Interp*); extern int Sqlitetest_init_Init(Tcl_Interp*); @@ -3490,6 +3565,8 @@ int TCLSH_MAIN(int argc, char **argv){ extern int SqlitetestOsinst_Init(Tcl_Interp*); extern int Sqlitetestbackup_Init(Tcl_Interp*); extern int Sqlitetestintarray_Init(Tcl_Interp*); + extern int Sqlitetestvfs_Init(Tcl_Interp *); + extern int SqlitetestStat_Init(Tcl_Interp*); Sqliteconfig_Init(interp); Sqlitetest1_Init(interp); @@ -3503,6 +3580,7 @@ int TCLSH_MAIN(int argc, char **argv){ Sqlitetest9_Init(interp); Sqlitetestasync_Init(interp); Sqlitetest_autoext_Init(interp); + Sqlitetest_demovfs_Init(interp); Sqlitetest_func_Init(interp); Sqlitetest_hexio_Init(interp); Sqlitetest_init_Init(interp); @@ -3515,12 +3593,34 @@ int TCLSH_MAIN(int argc, char **argv){ SqlitetestOsinst_Init(interp); Sqlitetestbackup_Init(interp); Sqlitetestintarray_Init(interp); + Sqlitetestvfs_Init(interp); + SqlitetestStat_Init(interp); + + Tcl_CreateObjCommand(interp,"load_testfixture_extensions",init_all_cmd,0,0); #ifdef SQLITE_SSE Sqlitetestsse_Init(interp); #endif } #endif +} + +#define TCLSH_MAIN main /* Needed to fake out mktclapp */ +int TCLSH_MAIN(int argc, char **argv){ + Tcl_Interp *interp; + + /* Call sqlite3_shutdown() once before doing anything else. This is to + ** test that sqlite3_shutdown() can be safely called by a process before + ** sqlite3_initialize() is. */ + sqlite3_shutdown(); + +#if TCLSH==2 + sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); +#endif + Tcl_FindExecutable(argv[0]); + + interp = Tcl_CreateInterp(); + init_all(interp); if( argc>=2 ){ int i; char zArgc[32]; @@ -3532,14 +3632,14 @@ int TCLSH_MAIN(int argc, char **argv){ Tcl_SetVar(interp, "argv", argv[i], TCL_GLOBAL_ONLY | TCL_LIST_ELEMENT | TCL_APPEND_VALUE); } - if( Tcl_EvalFile(interp, argv[1])!=TCL_OK ){ + if( TCLSH==1 && Tcl_EvalFile(interp, argv[1])!=TCL_OK ){ const char *zInfo = Tcl_GetVar(interp, "errorInfo", TCL_GLOBAL_ONLY); if( zInfo==0 ) zInfo = Tcl_GetStringResult(interp); fprintf(stderr,"%s: %s\n", *argv, zInfo); return 1; } } - if( argc<=1 ){ + if( TCLSH==2 || argc<=1 ){ Tcl_GlobalEval(interp, zMainloop); } return 0; diff --git a/src/test1.c b/src/test1.c index 7c5d497..33ac7a1 100644 --- a/src/test1.c +++ b/src/test1.c @@ -1901,6 +1901,13 @@ static int sqlite_abort( int argc, /* Number of arguments */ char **argv /* Text of each argument */ ){ +#if defined(_MSC_VER) + /* We do this, otherwise the test will halt with a popup message + * that we have to click away before the test will continue. + */ + _set_abort_behavior( 0, _CALL_REPORTFAULT ); +#endif + exit(255); assert( interp==0 ); /* This will always fail */ return TCL_OK; } @@ -2025,6 +2032,7 @@ static int test_stmt_status( } aOp[] = { { "SQLITE_STMTSTATUS_FULLSCAN_STEP", SQLITE_STMTSTATUS_FULLSCAN_STEP }, { "SQLITE_STMTSTATUS_SORT", SQLITE_STMTSTATUS_SORT }, + { "SQLITE_STMTSTATUS_AUTOINDEX", SQLITE_STMTSTATUS_AUTOINDEX }, }; if( objc!=4 ){ Tcl_WrongNumArgs(interp, 1, objv, "STMT PARAMETER RESETFLAG"); @@ -2625,7 +2633,7 @@ bad_args: } /* -** Usage: test_errstr +** Usage: sqlite3_test_errstr ** ** Test that the english language string equivalents for sqlite error codes ** are sane. The parameter is an integer representing an sqlite error code. @@ -3899,7 +3907,6 @@ static int test_global_recover( int objc, Tcl_Obj *CONST objv[] ){ -#ifndef SQLITE_OMIT_GLOBALRECOVER #ifndef SQLITE_OMIT_DEPRECATED int rc; if( objc!=1 ){ @@ -3908,7 +3915,6 @@ static int test_global_recover( } rc = sqlite3_global_recover(); Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); -#endif #endif return TCL_OK; } @@ -4608,7 +4614,7 @@ static int file_control_lasterrno_test( } /* -** tclcmd: file_control_lockproxy_test DB +** tclcmd: file_control_lockproxy_test DB PWD ** ** This TCL command runs the sqlite3_file_control interface and ** verifies correct operation of the SQLITE_GET_LOCKPROXYFILE and @@ -4621,15 +4627,18 @@ static int file_control_lockproxy_test( Tcl_Obj *CONST objv[] /* Command arguments */ ){ sqlite3 *db; + const char *zPwd; + int nPwd; - if( objc!=2 ){ + if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetStringFromObj(objv[0], 0), " DB", 0); + Tcl_GetStringFromObj(objv[0], 0), " DB PWD", 0); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ){ return TCL_ERROR; } + zPwd = Tcl_GetStringFromObj(objv[2], &nPwd); #if !defined(SQLITE_ENABLE_LOCKING_STYLE) # if defined(__APPLE__) @@ -4640,9 +4649,15 @@ static int file_control_lockproxy_test( #endif #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) { - char *proxyPath = "test.proxy"; char *testPath; int rc; + char proxyPath[400]; + + if( sizeof(proxyPath)flags&SQLITE_OPEN_MAIN_DB) ){ iSkip = 512; @@ -520,8 +520,30 @@ static int cfDeviceCharacteristics(sqlite3_file *pFile){ return g.iDeviceCharacteristics; } +/* +** Pass-throughs for WAL support. +*/ +static int cfShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ + return sqlite3OsShmLock(((CrashFile*)pFile)->pRealFile, ofst, n, flags); +} +static void cfShmBarrier(sqlite3_file *pFile){ + sqlite3OsShmBarrier(((CrashFile*)pFile)->pRealFile); +} +static int cfShmUnmap(sqlite3_file *pFile, int delFlag){ + return sqlite3OsShmUnmap(((CrashFile*)pFile)->pRealFile, delFlag); +} +static int cfShmMap( + sqlite3_file *pFile, /* Handle open on database file */ + int iRegion, /* Region to retrieve */ + int sz, /* Size of regions */ + int w, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + return sqlite3OsShmMap(((CrashFile*)pFile)->pRealFile, iRegion, sz, w, pp); +} + static const sqlite3_io_methods CrashFileVtab = { - 1, /* iVersion */ + 2, /* iVersion */ cfClose, /* xClose */ cfRead, /* xRead */ cfWrite, /* xWrite */ @@ -533,7 +555,11 @@ static const sqlite3_io_methods CrashFileVtab = { cfCheckReservedLock, /* xCheckReservedLock */ cfFileControl, /* xFileControl */ cfSectorSize, /* xSectorSize */ - cfDeviceCharacteristics /* xDeviceCharacteristics */ + cfDeviceCharacteristics, /* xDeviceCharacteristics */ + cfShmMap, /* xShmMap */ + cfShmLock, /* xShmLock */ + cfShmBarrier, /* xShmBarrier */ + cfShmUnmap /* xShmUnmap */ }; /* @@ -764,7 +790,7 @@ static int crashEnableCmd( ){ int isEnable; static sqlite3_vfs crashVfs = { - 1, /* iVersion */ + 2, /* iVersion */ 0, /* szOsFile */ 0, /* mxPathname */ 0, /* pNext */ @@ -781,7 +807,9 @@ static int crashEnableCmd( cfDlClose, /* xDlClose */ cfRandomness, /* xRandomness */ cfSleep, /* xSleep */ - cfCurrentTime /* xCurrentTime */ + cfCurrentTime, /* xCurrentTime */ + 0, /* xGetlastError */ + 0, /* xCurrentTimeInt64 */ }; if( objc!=2 ){ diff --git a/src/test_async.c b/src/test_async.c index c0c0cd1..c760eea 100644 --- a/src/test_async.c +++ b/src/test_async.c @@ -84,6 +84,7 @@ static Tcl_ThreadCreateType tclWriterThread(ClientData pIsStarted){ *((int *)pIsStarted) = 1; sqlite3async_run(); Tcl_MutexUnlock(&testasync_g_writerMutex); + Tcl_ExitThread(0); TCL_THREAD_CREATE_RETURN; } @@ -228,7 +229,7 @@ static int testAsyncControl( ** of this module. */ int Sqlitetestasync_Init(Tcl_Interp *interp){ -#if SQLITE_ENABLE_ASYNCIO +#ifdef SQLITE_ENABLE_ASYNCIO Tcl_CreateObjCommand(interp,"sqlite3async_start",testAsyncStart,0,0); Tcl_CreateObjCommand(interp,"sqlite3async_wait",testAsyncWait,0,0); diff --git a/src/test_config.c b/src/test_config.c index 7350392..7ada13f 100644 --- a/src/test_config.c +++ b/src/test_config.c @@ -127,6 +127,12 @@ static void set_options(Tcl_Interp *interp){ Tcl_SetVar2(interp, "sqlite_options", "autoinc", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_AUTOMATIC_INDEX + Tcl_SetVar2(interp, "sqlite_options", "autoindex", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "autoindex", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_AUTOVACUUM Tcl_SetVar2(interp, "sqlite_options", "autovacuum", "0", TCL_GLOBAL_ONLY); #else @@ -279,12 +285,6 @@ static void set_options(Tcl_Interp *interp){ Tcl_SetVar2(interp, "sqlite_options", "gettable", "1", TCL_GLOBAL_ONLY); #endif -#ifdef SQLITE_OMIT_GLOBALRECOVER - Tcl_SetVar2(interp, "sqlite_options", "globalrecover", "0", TCL_GLOBAL_ONLY); -#else - Tcl_SetVar2(interp, "sqlite_options", "globalrecover", "1", TCL_GLOBAL_ONLY); -#endif - #ifdef SQLITE_ENABLE_ICU Tcl_SetVar2(interp, "sqlite_options", "icu", "1", TCL_GLOBAL_ONLY); #else @@ -463,7 +463,7 @@ Tcl_SetVar2(interp, "sqlite_options", "long_double", Tcl_SetVar2(interp, "sqlite_options", "trigger", "1", TCL_GLOBAL_ONLY); #endif -#ifdef SQLITE_OMIT_TRUCATE_OPTIMIZATION +#ifdef SQLITE_OMIT_TRUNCATE_OPTIMIZATION Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "1", TCL_GLOBAL_ONLY); @@ -493,6 +493,12 @@ Tcl_SetVar2(interp, "sqlite_options", "long_double", Tcl_SetVar2(interp, "sqlite_options", "vtab", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_WAL + Tcl_SetVar2(interp, "sqlite_options", "wal", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "wal", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_WSD Tcl_SetVar2(interp, "sqlite_options", "wsd", "0", TCL_GLOBAL_ONLY); #else diff --git a/src/test_demovfs.c b/src/test_demovfs.c new file mode 100644 index 0000000..7cdae80 --- /dev/null +++ b/src/test_demovfs.c @@ -0,0 +1,671 @@ +/* +** 2010 April 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** An example of a simple VFS implementation that omits complex features +** often not required or not possible on embedded platforms. Also includes +** code to buffer writes to the journal file, which can be a significant +** performance improvement on some embedded platforms. +** +*/ + +/* +** OVERVIEW +** +** The code in this file implements a minimal SQLite VFS that can be +** used on Linux and other posix-like operating systems. The following +** system calls are used: +** +** File-system: access(), unlink(), getcwd() +** File IO: open(), read(), write(), fsync(), close(), fstat() +** Other: sleep(), usleep(), time() +** +** The following VFS features are omitted: +** +** 1. File locking. The user must ensure that there is at most one +** connection to each database when using this VFS. Multiple +** connections to a single shared-cache count as a single connection +** for the purposes of the previous statement. +** +** 2. The loading of dynamic extensions (shared libraries). +** +** 3. Temporary files. The user must configure SQLite to use in-memory +** temp files when using this VFS. The easiest way to do this is to +** compile with: +** +** -DSQLITE_TEMP_STORE=3 +** +** 4. File truncation. As of version 3.6.24, SQLite may run without +** a working xTruncate() call, providing the user does not configure +** SQLite to use "journal_mode=truncate", or use both +** "journal_mode=persist" and ATTACHed databases. +** +** It is assumed that the system uses UNIX-like path-names. Specifically, +** that '/' characters are used to separate path components and that +** a path-name is a relative path unless it begins with a '/'. And that +** no UTF-8 encoded paths are greater than 512 bytes in length. +** +** JOURNAL WRITE-BUFFERING +** +** To commit a transaction to the database, SQLite first writes rollback +** information into the journal file. This usually consists of 4 steps: +** +** 1. The rollback information is sequentially written into the journal +** file, starting at the start of the file. +** 2. The journal file is synced to disk. +** 3. A modification is made to the first few bytes of the journal file. +** 4. The journal file is synced to disk again. +** +** Most of the data is written in step 1 using a series of calls to the +** VFS xWrite() method. The buffers passed to the xWrite() calls are of +** various sizes. For example, as of version 3.6.24, when committing a +** transaction that modifies 3 pages of a database file that uses 4096 +** byte pages residing on a media with 512 byte sectors, SQLite makes +** eleven calls to the xWrite() method to create the rollback journal, +** as follows: +** +** Write offset | Bytes written +** ---------------------------- +** 0 512 +** 512 4 +** 516 4096 +** 4612 4 +** 4616 4 +** 4620 4096 +** 8716 4 +** 8720 4 +** 8724 4096 +** 12820 4 +** ++++++++++++SYNC+++++++++++ +** 0 12 +** ++++++++++++SYNC+++++++++++ +** +** On many operating systems, this is an efficient way to write to a file. +** However, on some embedded systems that do not cache writes in OS +** buffers it is much more efficient to write data in blocks that are +** an integer multiple of the sector-size in size and aligned at the +** start of a sector. +** +** To work around this, the code in this file allocates a fixed size +** buffer of SQLITE_DEMOVFS_BUFFERSZ using sqlite3_malloc() whenever a +** journal file is opened. It uses the buffer to coalesce sequential +** writes into aligned SQLITE_DEMOVFS_BUFFERSZ blocks. When SQLite +** invokes the xSync() method to sync the contents of the file to disk, +** all accumulated data is written out, even if it does not constitute +** a complete block. This means the actual IO to create the rollback +** journal for the example transaction above is this: +** +** Write offset | Bytes written +** ---------------------------- +** 0 8192 +** 8192 4632 +** ++++++++++++SYNC+++++++++++ +** 0 12 +** ++++++++++++SYNC+++++++++++ +** +** Much more efficient if the underlying OS is not caching write +** operations. +*/ + +#if !defined(SQLITE_TEST) || defined(SQLITE_OS_UNIX) + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* +** Size of the write buffer used by journal files in bytes. +*/ +#ifndef SQLITE_DEMOVFS_BUFFERSZ +# define SQLITE_DEMOVFS_BUFFERSZ 8192 +#endif + +/* +** When using this VFS, the sqlite3_file* handles that SQLite uses are +** actually pointers to instances of type DemoFile. +*/ +typedef struct DemoFile DemoFile; +struct DemoFile { + sqlite3_file base; /* Base class. Must be first. */ + int fd; /* File descriptor */ + + char *aBuffer; /* Pointer to malloc'd buffer */ + int nBuffer; /* Valid bytes of data in zBuffer */ + sqlite3_int64 iBufferOfst; /* Offset in file of zBuffer[0] */ +}; + +/* +** Write directly to the file passed as the first argument. Even if the +** file has a write-buffer (DemoFile.aBuffer), ignore it. +*/ +static int demoDirectWrite( + DemoFile *p, /* File handle */ + const void *zBuf, /* Buffer containing data to write */ + int iAmt, /* Size of data to write in bytes */ + sqlite_int64 iOfst /* File offset to write to */ +){ + off_t ofst; /* Return value from lseek() */ + size_t nWrite; /* Return value from write() */ + + ofst = lseek(p->fd, iOfst, SEEK_SET); + if( ofst!=iOfst ){ + return SQLITE_IOERR_WRITE; + } + + nWrite = write(p->fd, zBuf, iAmt); + if( nWrite!=iAmt ){ + return SQLITE_IOERR_WRITE; + } + + return SQLITE_OK; +} + +/* +** Flush the contents of the DemoFile.aBuffer buffer to disk. This is a +** no-op if this particular file does not have a buffer (i.e. it is not +** a journal file) or if the buffer is currently empty. +*/ +static int demoFlushBuffer(DemoFile *p){ + int rc = SQLITE_OK; + if( p->nBuffer ){ + rc = demoDirectWrite(p, p->aBuffer, p->nBuffer, p->iBufferOfst); + p->nBuffer = 0; + } + return rc; +} + +/* +** Close a file. +*/ +static int demoClose(sqlite3_file *pFile){ + int rc; + DemoFile *p = (DemoFile*)pFile; + rc = demoFlushBuffer(p); + sqlite3_free(p->aBuffer); + close(p->fd); + return rc; +} + +/* +** Read data from a file. +*/ +static int demoRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + DemoFile *p = (DemoFile*)pFile; + off_t ofst; /* Return value from lseek() */ + int nRead; /* Return value from read() */ + int rc; /* Return code from demoFlushBuffer() */ + + /* Flush any data in the write buffer to disk in case this operation + ** is trying to read data the file-region currently cached in the buffer. + ** It would be possible to detect this case and possibly save an + ** unnecessary write here, but in practice SQLite will rarely read from + ** a journal file when there is data cached in the write-buffer. + */ + rc = demoFlushBuffer(p); + if( rc!=SQLITE_OK ){ + return rc; + } + + ofst = lseek(p->fd, iOfst, SEEK_SET); + if( ofst!=iOfst ){ + return SQLITE_IOERR_READ; + } + nRead = read(p->fd, zBuf, iAmt); + + if( nRead==iAmt ){ + return SQLITE_OK; + }else if( nRead>=0 ){ + return SQLITE_IOERR_SHORT_READ; + } + + return SQLITE_IOERR_READ; +} + +/* +** Write data to a crash-file. +*/ +static int demoWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + DemoFile *p = (DemoFile*)pFile; + + if( p->aBuffer ){ + char *z = (char *)zBuf; /* Pointer to remaining data to write */ + int n = iAmt; /* Number of bytes at z */ + sqlite3_int64 i = iOfst; /* File offset to write to */ + + while( n>0 ){ + int nCopy; /* Number of bytes to copy into buffer */ + + /* If the buffer is full, or if this data is not being written directly + ** following the data already buffered, flush the buffer. Flushing + ** the buffer is a no-op if it is empty. + */ + if( p->nBuffer==SQLITE_DEMOVFS_BUFFERSZ || p->iBufferOfst+p->nBuffer!=i ){ + int rc = demoFlushBuffer(p); + if( rc!=SQLITE_OK ){ + return rc; + } + } + assert( p->nBuffer==0 || p->iBufferOfst+p->nBuffer==i ); + p->iBufferOfst = i - p->nBuffer; + + /* Copy as much data as possible into the buffer. */ + nCopy = SQLITE_DEMOVFS_BUFFERSZ - p->nBuffer; + if( nCopy>n ){ + nCopy = n; + } + memcpy(&p->aBuffer[p->nBuffer], z, nCopy); + p->nBuffer += nCopy; + + n -= nCopy; + i += nCopy; + z += nCopy; + } + }else{ + return demoDirectWrite(p, zBuf, iAmt, iOfst); + } + + return SQLITE_OK; +} + +/* +** Truncate a file. This is a no-op for this VFS (see header comments at +** the top of the file). +*/ +static int demoTruncate(sqlite3_file *pFile, sqlite_int64 size){ +#if 0 + if( ftruncate(((DemoFile *)pFile)->fd, size) ) return SQLITE_IOERR_TRUNCATE; +#endif + return SQLITE_OK; +} + +/* +** Sync the contents of the file to the persistent media. +*/ +static int demoSync(sqlite3_file *pFile, int flags){ + DemoFile *p = (DemoFile*)pFile; + int rc; + + rc = demoFlushBuffer(p); + if( rc!=SQLITE_OK ){ + return rc; + } + + rc = fsync(p->fd); + return (rc==0 ? SQLITE_OK : SQLITE_IOERR_FSYNC); +} + +/* +** Write the size of the file in bytes to *pSize. +*/ +static int demoFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + DemoFile *p = (DemoFile*)pFile; + int rc; /* Return code from fstat() call */ + struct stat sStat; /* Output of fstat() call */ + + /* Flush the contents of the buffer to disk. As with the flush in the + ** demoRead() method, it would be possible to avoid this and save a write + ** here and there. But in practice this comes up so infrequently it is + ** not worth the trouble. + */ + rc = demoFlushBuffer(p); + if( rc!=SQLITE_OK ){ + return rc; + } + + rc = fstat(p->fd, &sStat); + if( rc!=0 ) return SQLITE_IOERR_FSTAT; + *pSize = sStat.st_size; + return SQLITE_OK; +} + +/* +** Locking functions. The xLock() and xUnlock() methods are both no-ops. +** The xCheckReservedLock() always indicates that no other process holds +** a reserved lock on the database file. This ensures that if a hot-journal +** file is found in the file-system it is rolled back. +*/ +static int demoLock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} +static int demoUnlock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} +static int demoCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + *pResOut = 0; + return SQLITE_OK; +} + +/* +** No xFileControl() verbs are implemented by this VFS. +*/ +static int demoFileControl(sqlite3_file *pFile, int op, void *pArg){ + return SQLITE_OK; +} + +/* +** The xSectorSize() and xDeviceCharacteristics() methods. These two +** may return special values allowing SQLite to optimize file-system +** access to some extent. But it is also safe to simply return 0. +*/ +static int demoSectorSize(sqlite3_file *pFile){ + return 0; +} +static int demoDeviceCharacteristics(sqlite3_file *pFile){ + return 0; +} + +/* +** Open a file handle. +*/ +static int demoOpen( + sqlite3_vfs *pVfs, /* VFS */ + const char *zName, /* File to open, or 0 for a temp file */ + sqlite3_file *pFile, /* Pointer to DemoFile struct to populate */ + int flags, /* Input SQLITE_OPEN_XXX flags */ + int *pOutFlags /* Output SQLITE_OPEN_XXX flags (or NULL) */ +){ + static const sqlite3_io_methods demoio = { + 1, /* iVersion */ + demoClose, /* xClose */ + demoRead, /* xRead */ + demoWrite, /* xWrite */ + demoTruncate, /* xTruncate */ + demoSync, /* xSync */ + demoFileSize, /* xFileSize */ + demoLock, /* xLock */ + demoUnlock, /* xUnlock */ + demoCheckReservedLock, /* xCheckReservedLock */ + demoFileControl, /* xFileControl */ + demoSectorSize, /* xSectorSize */ + demoDeviceCharacteristics /* xDeviceCharacteristics */ + }; + + DemoFile *p = (DemoFile*)pFile; /* Populate this structure */ + int oflags = 0; /* flags to pass to open() call */ + char *aBuf = 0; + + if( zName==0 ){ + return SQLITE_IOERR; + } + + if( flags&SQLITE_OPEN_MAIN_JOURNAL ){ + aBuf = (char *)sqlite3_malloc(SQLITE_DEMOVFS_BUFFERSZ); + if( !aBuf ){ + return SQLITE_NOMEM; + } + } + + if( flags&SQLITE_OPEN_EXCLUSIVE ) oflags |= O_EXCL; + if( flags&SQLITE_OPEN_CREATE ) oflags |= O_CREAT; + if( flags&SQLITE_OPEN_READONLY ) oflags |= O_RDONLY; + if( flags&SQLITE_OPEN_READWRITE ) oflags |= O_RDWR; + + memset(p, 0, sizeof(DemoFile)); + p->fd = open(zName, oflags, 0600); + if( p->fd<0 ){ + sqlite3_free(aBuf); + return SQLITE_CANTOPEN; + } + p->aBuffer = aBuf; + + if( pOutFlags ){ + *pOutFlags = flags; + } + p->base.pMethods = &demoio; + return SQLITE_OK; +} + +/* +** Delete the file identified by argument zPath. If the dirSync parameter +** is non-zero, then ensure the file-system modification to delete the +** file has been synced to disk before returning. +*/ +static int demoDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int rc; + rc = unlink(zPath); + if( rc==0 && dirSync ){ + int dfd; /* File descriptor open on directory */ + int i; /* Iterator variable */ + char zDir[pVfs->mxPathname+1];/* Name of directory containing file zPath */ + + /* Figure out the directory name from the path of the file deleted. */ + sqlite3_snprintf(pVfs->mxPathname, zDir, "%s", zPath); + zDir[pVfs->mxPathname] = '\0'; + for(i=strlen(zDir); i>1 && zDir[i]!='/'; i++); + zDir[i] = '\0'; + + /* Open a file-descriptor on the directory. Sync. Close. */ + dfd = open(zDir, O_RDONLY, 0); + if( dfd<0 ){ + rc = -1; + }else{ + rc = fsync(dfd); + close(dfd); + } + } + return (rc==0 ? SQLITE_OK : SQLITE_IOERR_DELETE); +} + +#ifndef F_OK +# define F_OK 0 +#endif +#ifndef R_OK +# define R_OK 4 +#endif +#ifndef W_OK +# define W_OK 2 +#endif + +/* +** Query the file-system to see if the named file exists, is readable or +** is both readable and writable. +*/ +static int demoAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + int rc; /* access() return code */ + int eAccess = F_OK; /* Second argument to access() */ + + assert( flags==SQLITE_ACCESS_EXISTS /* access(zPath, F_OK) */ + || flags==SQLITE_ACCESS_READ /* access(zPath, R_OK) */ + || flags==SQLITE_ACCESS_READWRITE /* access(zPath, R_OK|W_OK) */ + ); + + if( flags==SQLITE_ACCESS_READWRITE ) eAccess = R_OK|W_OK; + if( flags==SQLITE_ACCESS_READ ) eAccess = R_OK; + + rc = access(zPath, eAccess); + *pResOut = (rc==0); + return SQLITE_OK; +} + +/* +** Argument zPath points to a nul-terminated string containing a file path. +** If zPath is an absolute path, then it is copied as is into the output +** buffer. Otherwise, if it is a relative path, then the equivalent full +** path is written to the output buffer. +** +** This function assumes that paths are UNIX style. Specifically, that: +** +** 1. Path components are separated by a '/'. and +** 2. Full paths begin with a '/' character. +*/ +static int demoFullPathname( + sqlite3_vfs *pVfs, /* VFS */ + const char *zPath, /* Input path (possibly a relative path) */ + int nPathOut, /* Size of output buffer in bytes */ + char *zPathOut /* Pointer to output buffer */ +){ + char zDir[pVfs->mxPathname+1]; + if( zPath[0]=='/' ){ + zDir[0] = '\0'; + }else{ + getcwd(zDir, sizeof(zDir)); + } + zDir[pVfs->mxPathname] = '\0'; + + sqlite3_snprintf(nPathOut, zPathOut, "%s/%s", zDir, zPath); + zPathOut[nPathOut-1] = '\0'; + + return SQLITE_OK; +} + +/* +** The following four VFS methods: +** +** xDlOpen +** xDlError +** xDlSym +** xDlClose +** +** are supposed to implement the functionality needed by SQLite to load +** extensions compiled as shared objects. This simple VFS does not support +** this functionality, so the following functions are no-ops. +*/ +static void *demoDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return 0; +} +static void demoDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + sqlite3_snprintf(nByte, zErrMsg, "Loadable extensions are not supported"); + zErrMsg[nByte-1] = '\0'; +} +static void (*demoDlSym(sqlite3_vfs *pVfs, void *pH, const char *z))(void){ + return 0; +} +static void demoDlClose(sqlite3_vfs *pVfs, void *pHandle){ + return; +} + +/* +** Parameter zByte points to a buffer nByte bytes in size. Populate this +** buffer with pseudo-random data. +*/ +static int demoRandomness(sqlite3_vfs *pVfs, int nByte, char *zByte){ + return SQLITE_OK; +} + +/* +** Sleep for at least nMicro microseconds. Return the (approximate) number +** of microseconds slept for. +*/ +static int demoSleep(sqlite3_vfs *pVfs, int nMicro){ + sleep(nMicro / 1000000); + usleep(nMicro % 1000000); + return nMicro; +} + +/* +** Set *pTime to the current UTC time expressed as a Julian day. Return +** SQLITE_OK if successful, or an error code otherwise. +** +** http://en.wikipedia.org/wiki/Julian_day +** +** This implementation is not very good. The current time is rounded to +** an integer number of seconds. Also, assuming time_t is a signed 32-bit +** value, it will stop working some time in the year 2038 AD (the so-called +** "year 2038" problem that afflicts systems that store time this way). +*/ +static int demoCurrentTime(sqlite3_vfs *pVfs, double *pTime){ + time_t t = time(0); + *pTime = t/86400.0 + 2440587.5; + return SQLITE_OK; +} + +/* +** This function returns a pointer to the VFS implemented in this file. +** To make the VFS available to SQLite: +** +** sqlite3_vfs_register(sqlite3_demovfs(), 0); +*/ +sqlite3_vfs *sqlite3_demovfs(void){ + static sqlite3_vfs demovfs = { + 1, /* iVersion */ + sizeof(DemoFile), /* szOsFile */ + 512, /* mxPathname */ + 0, /* pNext */ + "demo", /* zName */ + 0, /* pAppData */ + demoOpen, /* xOpen */ + demoDelete, /* xDelete */ + demoAccess, /* xAccess */ + demoFullPathname, /* xFullPathname */ + demoDlOpen, /* xDlOpen */ + demoDlError, /* xDlError */ + demoDlSym, /* xDlSym */ + demoDlClose, /* xDlClose */ + demoRandomness, /* xRandomness */ + demoSleep, /* xSleep */ + demoCurrentTime, /* xCurrentTime */ + }; + return &demovfs; +} + +#endif /* !defined(SQLITE_TEST) || defined(SQLITE_OS_UNIX) */ + + +#ifdef SQLITE_TEST + +#include + +#ifdef SQLITE_OS_UNIX +static int register_demovfs( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3_vfs_register(sqlite3_demovfs(), 1); + return TCL_OK; +} +static int unregister_demovfs( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3_vfs_unregister(sqlite3_demovfs()); + return TCL_OK; +} + +/* +** Register commands with the TCL interpreter. +*/ +int Sqlitetest_demovfs_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "register_demovfs", register_demovfs, 0, 0); + Tcl_CreateObjCommand(interp, "unregister_demovfs", unregister_demovfs, 0, 0); + return TCL_OK; +} + +#else +int Sqlitetest_demovfs_Init(Tcl_Interp *interp){ return TCL_OK; } +#endif + +#endif /* SQLITE_TEST */ diff --git a/src/test_devsym.c b/src/test_devsym.c index 836a450..21f0f68 100644 --- a/src/test_devsym.c +++ b/src/test_devsym.c @@ -50,6 +50,10 @@ static int devsymCheckReservedLock(sqlite3_file*, int *); static int devsymFileControl(sqlite3_file*, int op, void *pArg); static int devsymSectorSize(sqlite3_file*); static int devsymDeviceCharacteristics(sqlite3_file*); +static int devsymShmLock(sqlite3_file*,int,int,int); +static int devsymShmMap(sqlite3_file*,int,int,int, void volatile **); +static void devsymShmBarrier(sqlite3_file*); +static int devsymShmUnmap(sqlite3_file*,int); /* ** Method declarations for devsym_vfs. @@ -69,7 +73,7 @@ static int devsymSleep(sqlite3_vfs*, int microseconds); static int devsymCurrentTime(sqlite3_vfs*, double*); static sqlite3_vfs devsym_vfs = { - 1, /* iVersion */ + 2, /* iVersion */ sizeof(devsym_file), /* szOsFile */ DEVSYM_MAX_PATHNAME, /* mxPathname */ 0, /* pNext */ @@ -92,11 +96,13 @@ static sqlite3_vfs devsym_vfs = { #endif /* SQLITE_OMIT_LOAD_EXTENSION */ devsymRandomness, /* xRandomness */ devsymSleep, /* xSleep */ - devsymCurrentTime /* xCurrentTime */ + devsymCurrentTime, /* xCurrentTime */ + 0, /* xGetLastError */ + 0 /* xCurrentTimeInt64 */ }; static sqlite3_io_methods devsym_io_methods = { - 1, /* iVersion */ + 2, /* iVersion */ devsymClose, /* xClose */ devsymRead, /* xRead */ devsymWrite, /* xWrite */ @@ -108,7 +114,11 @@ static sqlite3_io_methods devsym_io_methods = { devsymCheckReservedLock, /* xCheckReservedLock */ devsymFileControl, /* xFileControl */ devsymSectorSize, /* xSectorSize */ - devsymDeviceCharacteristics /* xDeviceCharacteristics */ + devsymDeviceCharacteristics, /* xDeviceCharacteristics */ + devsymShmMap, /* xShmMap */ + devsymShmLock, /* xShmLock */ + devsymShmBarrier, /* xShmBarrier */ + devsymShmUnmap /* xShmUnmap */ }; struct DevsymGlobal { @@ -222,6 +232,34 @@ static int devsymDeviceCharacteristics(sqlite3_file *pFile){ return g.iDeviceChar; } +/* +** Shared-memory methods are all pass-thrus. +*/ +static int devsymShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsShmLock(p->pReal, ofst, n, flags); +} +static int devsymShmMap( + sqlite3_file *pFile, + int iRegion, + int szRegion, + int isWrite, + void volatile **pp +){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsShmMap(p->pReal, iRegion, szRegion, isWrite, pp); +} +static void devsymShmBarrier(sqlite3_file *pFile){ + devsym_file *p = (devsym_file *)pFile; + sqlite3OsShmBarrier(p->pReal); +} +static int devsymShmUnmap(sqlite3_file *pFile, int delFlag){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsShmUnmap(p->pReal, delFlag); +} + + + /* ** Open an devsym file handle. */ @@ -330,9 +368,10 @@ static int devsymSleep(sqlite3_vfs *pVfs, int nMicro){ ** Return the current time as a Julian Day number in *pTimeOut. */ static int devsymCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ - return sqlite3OsCurrentTime(g.pVfs, pTimeOut); + return g.pVfs->xCurrentTime(g.pVfs, pTimeOut); } + /* ** This procedure registers the devsym vfs with SQLite. If the argument is ** true, the devsym vfs becomes the new default vfs. It is the only publicly @@ -346,9 +385,13 @@ void devsym_register(int iDeviceChar, int iSectorSize){ } if( iDeviceChar>=0 ){ g.iDeviceChar = iDeviceChar; + }else{ + g.iDeviceChar = 0; } if( iSectorSize>=0 ){ g.iSectorSize = iSectorSize; + }else{ + g.iSectorSize = 512; } } diff --git a/src/test_journal.c b/src/test_journal.c index f89f3a4..97e0e1b 100644 --- a/src/test_journal.c +++ b/src/test_journal.c @@ -161,9 +161,10 @@ static void jtDlClose(sqlite3_vfs*, void*); static int jtRandomness(sqlite3_vfs*, int nByte, char *zOut); static int jtSleep(sqlite3_vfs*, int microseconds); static int jtCurrentTime(sqlite3_vfs*, double*); +static int jtCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); static sqlite3_vfs jt_vfs = { - 1, /* iVersion */ + 2, /* iVersion */ sizeof(jt_file), /* szOsFile */ JT_MAX_PATHNAME, /* mxPathname */ 0, /* pNext */ @@ -179,7 +180,9 @@ static sqlite3_vfs jt_vfs = { jtDlClose, /* xDlClose */ jtRandomness, /* xRandomness */ jtSleep, /* xSleep */ - jtCurrentTime /* xCurrentTime */ + jtCurrentTime, /* xCurrentTime */ + 0, /* xGetLastError */ + jtCurrentTimeInt64 /* xCurrentTimeInt64 */ }; static sqlite3_io_methods jt_io_methods = { @@ -801,7 +804,13 @@ static int jtSleep(sqlite3_vfs *pVfs, int nMicro){ ** Return the current time as a Julian Day number in *pTimeOut. */ static int jtCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ - return sqlite3OsCurrentTime(g.pVfs, pTimeOut); + return g.pVfs->xCurrentTime(g.pVfs, pTimeOut); +} +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int jtCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ + return g.pVfs->xCurrentTimeInt64(g.pVfs, pTimeOut); } /************************************************************************** @@ -821,6 +830,11 @@ int jt_register(char *zWrap, int isDefault){ return SQLITE_ERROR; } jt_vfs.szOsFile = sizeof(jt_file) + g.pVfs->szOsFile; + if( g.pVfs->iVersion==1 ){ + jt_vfs.iVersion = 1; + }else if( g.pVfs->xCurrentTimeInt64==0 ){ + jt_vfs.xCurrentTimeInt64 = 0; + } sqlite3_vfs_register(&jt_vfs, isDefault); return SQLITE_OK; } diff --git a/src/test_malloc.c b/src/test_malloc.c index 5556cc1..1267f6e 100644 --- a/src/test_malloc.c +++ b/src/test_malloc.c @@ -1287,6 +1287,7 @@ static int test_db_status( int op; } aOp[] = { { "SQLITE_DBSTATUS_LOOKASIDE_USED", SQLITE_DBSTATUS_LOOKASIDE_USED }, + { "SQLITE_DBSTATUS_CACHE_USED", SQLITE_DBSTATUS_CACHE_USED }, }; Tcl_Obj *pResult; if( objc!=4 ){ @@ -1358,6 +1359,25 @@ static int test_install_memsys3( return TCL_OK; } +static int test_vfs_oom_test( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + extern int sqlite3_memdebug_vfs_oom_test; + if( objc>2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?INTEGER?"); + return TCL_ERROR; + }else if( objc==2 ){ + int iNew; + if( Tcl_GetIntFromObj(interp, objv[1], &iNew) ) return TCL_ERROR; + sqlite3_memdebug_vfs_oom_test = iNew; + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(sqlite3_memdebug_vfs_oom_test)); + return TCL_OK; +} + /* ** Register commands with the TCL interpreter. */ @@ -1395,6 +1415,7 @@ int Sqlitetest_malloc_Init(Tcl_Interp *interp){ { "sqlite3_dump_memsys3", test_dump_memsys3 ,3 }, { "sqlite3_dump_memsys5", test_dump_memsys3 ,5 }, { "sqlite3_install_memsys3", test_install_memsys3 ,0 }, + { "sqlite3_memdebug_vfs_oom_test", test_vfs_oom_test ,0 }, }; int i; for(i=0; i #include + /* -** Maximum pathname length supported by the inst backend. +** Maximum pathname length supported by the vfslog backend. */ #define INST_MAX_PATHNAME 512 - -/* File methods */ -/* Vfs methods */ #define OS_ACCESS 1 #define OS_CHECKRESERVEDLOCK 2 #define OS_CLOSE 3 @@ -115,281 +98,401 @@ #define OS_TRUNCATE 18 #define OS_UNLOCK 19 #define OS_WRITE 20 +#define OS_SHMUNMAP 22 +#define OS_SHMMAP 23 +#define OS_SHMLOCK 25 +#define OS_SHMBARRIER 26 +#define OS_ANNOTATE 28 -#define OS_NUMEVENTS 21 +#define OS_NUMEVENTS 29 -#define BINARYLOG_STRING 30 -#define BINARYLOG_MARKER 31 +#define VFSLOG_BUFFERSIZE 8192 -#define BINARYLOG_PREPARE_V2 64 -#define BINARYLOG_STEP 65 -#define BINARYLOG_FINALIZE 66 +typedef struct VfslogVfs VfslogVfs; +typedef struct VfslogFile VfslogFile; -struct InstVfs { - sqlite3_vfs base; - sqlite3_vfs *pVfs; - - void *pClient; - void (*xDel)(void *); - void (*xCall)(void *, int, int, sqlite3_int64, int, const char *, int, int, sqlite3_int64); - - /* Counters */ - sqlite3_int64 aTime[OS_NUMEVENTS]; - int aCount[OS_NUMEVENTS]; - - int iNextFileId; +struct VfslogVfs { + sqlite3_vfs base; /* VFS methods */ + sqlite3_vfs *pVfs; /* Parent VFS */ + int iNextFileId; /* Next file id */ + sqlite3_file *pLog; /* Log file handle */ + sqlite3_int64 iOffset; /* Log file offset of start of write buffer */ + int nBuf; /* Number of valid bytes in aBuf[] */ + char aBuf[VFSLOG_BUFFERSIZE]; /* Write buffer */ }; -typedef struct InstVfs InstVfs; -#define REALVFS(p) (((InstVfs *)(p))->pVfs) - -typedef struct inst_file inst_file; -struct inst_file { - sqlite3_file base; - sqlite3_file *pReal; - InstVfs *pInstVfs; - const char *zName; - int iFileId; /* File id number */ - int flags; +struct VfslogFile { + sqlite3_file base; /* IO methods */ + sqlite3_file *pReal; /* Underlying file handle */ + sqlite3_vfs *pVfslog; /* Associated VsflogVfs object */ + int iFileId; /* File id number */ }; +#define REALVFS(p) (((VfslogVfs *)(p))->pVfs) + + + /* -** Method declarations for inst_file. +** Method declarations for vfslog_file. */ -static int instClose(sqlite3_file*); -static int instRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); -static int instWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); -static int instTruncate(sqlite3_file*, sqlite3_int64 size); -static int instSync(sqlite3_file*, int flags); -static int instFileSize(sqlite3_file*, sqlite3_int64 *pSize); -static int instLock(sqlite3_file*, int); -static int instUnlock(sqlite3_file*, int); -static int instCheckReservedLock(sqlite3_file*, int *pResOut); -static int instFileControl(sqlite3_file*, int op, void *pArg); -static int instSectorSize(sqlite3_file*); -static int instDeviceCharacteristics(sqlite3_file*); +static int vfslogClose(sqlite3_file*); +static int vfslogRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int vfslogWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int vfslogTruncate(sqlite3_file*, sqlite3_int64 size); +static int vfslogSync(sqlite3_file*, int flags); +static int vfslogFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int vfslogLock(sqlite3_file*, int); +static int vfslogUnlock(sqlite3_file*, int); +static int vfslogCheckReservedLock(sqlite3_file*, int *pResOut); +static int vfslogFileControl(sqlite3_file*, int op, void *pArg); +static int vfslogSectorSize(sqlite3_file*); +static int vfslogDeviceCharacteristics(sqlite3_file*); + +static int vfslogShmLock(sqlite3_file *pFile, int ofst, int n, int flags); +static int vfslogShmMap(sqlite3_file *pFile,int,int,int,volatile void **); +static void vfslogShmBarrier(sqlite3_file*); +static int vfslogShmUnmap(sqlite3_file *pFile, int deleteFlag); /* -** Method declarations for inst_vfs. +** Method declarations for vfslog_vfs. */ -static int instOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); -static int instDelete(sqlite3_vfs*, const char *zName, int syncDir); -static int instAccess(sqlite3_vfs*, const char *zName, int flags, int *); -static int instFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); -static void *instDlOpen(sqlite3_vfs*, const char *zFilename); -static void instDlError(sqlite3_vfs*, int nByte, char *zErrMsg); -static void (*instDlSym(sqlite3_vfs *pVfs, void *p, const char*zSym))(void); -static void instDlClose(sqlite3_vfs*, void*); -static int instRandomness(sqlite3_vfs*, int nByte, char *zOut); -static int instSleep(sqlite3_vfs*, int microseconds); -static int instCurrentTime(sqlite3_vfs*, double*); +static int vfslogOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int vfslogDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int vfslogAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int vfslogFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *vfslogDlOpen(sqlite3_vfs*, const char *zFilename); +static void vfslogDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*vfslogDlSym(sqlite3_vfs *pVfs, void *p, const char*zSym))(void); +static void vfslogDlClose(sqlite3_vfs*, void*); +static int vfslogRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int vfslogSleep(sqlite3_vfs*, int microseconds); +static int vfslogCurrentTime(sqlite3_vfs*, double*); -static void binarylog_blob(sqlite3_vfs *, const char *, int, int); +static int vfslogGetLastError(sqlite3_vfs*, int, char *); +static int vfslogCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); -static sqlite3_vfs inst_vfs = { - 1, /* iVersion */ - sizeof(inst_file), /* szOsFile */ - INST_MAX_PATHNAME, /* mxPathname */ - 0, /* pNext */ - 0, /* zName */ - 0, /* pAppData */ - instOpen, /* xOpen */ - instDelete, /* xDelete */ - instAccess, /* xAccess */ - instFullPathname, /* xFullPathname */ - instDlOpen, /* xDlOpen */ - instDlError, /* xDlError */ - instDlSym, /* xDlSym */ - instDlClose, /* xDlClose */ - instRandomness, /* xRandomness */ - instSleep, /* xSleep */ - instCurrentTime /* xCurrentTime */ +static sqlite3_vfs vfslog_vfs = { + 1, /* iVersion */ + sizeof(VfslogFile), /* szOsFile */ + INST_MAX_PATHNAME, /* mxPathname */ + 0, /* pNext */ + 0, /* zName */ + 0, /* pAppData */ + vfslogOpen, /* xOpen */ + vfslogDelete, /* xDelete */ + vfslogAccess, /* xAccess */ + vfslogFullPathname, /* xFullPathname */ + vfslogDlOpen, /* xDlOpen */ + vfslogDlError, /* xDlError */ + vfslogDlSym, /* xDlSym */ + vfslogDlClose, /* xDlClose */ + vfslogRandomness, /* xRandomness */ + vfslogSleep, /* xSleep */ + vfslogCurrentTime, /* xCurrentTime */ + vfslogGetLastError, /* xGetLastError */ + vfslogCurrentTimeInt64 /* xCurrentTime */ }; -static sqlite3_io_methods inst_io_methods = { - 1, /* iVersion */ - instClose, /* xClose */ - instRead, /* xRead */ - instWrite, /* xWrite */ - instTruncate, /* xTruncate */ - instSync, /* xSync */ - instFileSize, /* xFileSize */ - instLock, /* xLock */ - instUnlock, /* xUnlock */ - instCheckReservedLock, /* xCheckReservedLock */ - instFileControl, /* xFileControl */ - instSectorSize, /* xSectorSize */ - instDeviceCharacteristics /* xDeviceCharacteristics */ +static sqlite3_io_methods vfslog_io_methods = { + 2, /* iVersion */ + vfslogClose, /* xClose */ + vfslogRead, /* xRead */ + vfslogWrite, /* xWrite */ + vfslogTruncate, /* xTruncate */ + vfslogSync, /* xSync */ + vfslogFileSize, /* xFileSize */ + vfslogLock, /* xLock */ + vfslogUnlock, /* xUnlock */ + vfslogCheckReservedLock, /* xCheckReservedLock */ + vfslogFileControl, /* xFileControl */ + vfslogSectorSize, /* xSectorSize */ + vfslogDeviceCharacteristics, /* xDeviceCharacteristics */ + vfslogShmMap, /* xShmMap */ + vfslogShmLock, /* xShmLock */ + vfslogShmBarrier, /* xShmBarrier */ + vfslogShmUnmap /* xShmUnmap */ }; -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -#include "hwtime.h" - -#define OS_TIME_IO(eEvent, A, B, Call) { \ - inst_file *p = (inst_file *)pFile; \ - InstVfs *pInstVfs = p->pInstVfs; \ - int rc; \ - sqlite_uint64 t = sqlite3Hwtime(); \ - rc = Call; \ - t = sqlite3Hwtime() - t; \ - pInstVfs->aTime[eEvent] += t; \ - pInstVfs->aCount[eEvent] += 1; \ - if( pInstVfs->xCall ){ \ - pInstVfs->xCall( \ - pInstVfs->pClient,eEvent,p->iFileId,t,rc,p->zName,p->flags,A,B \ - ); \ - } \ - return rc; \ +#if defined(SQLITE_OS_UNIX) && !defined(NO_GETTOD) +#include +static sqlite3_uint64 vfslog_time(){ + struct timeval sTime; + gettimeofday(&sTime, 0); + return sTime.tv_usec + (sqlite3_uint64)sTime.tv_sec * 1000000; } +#elif defined(SQLITE_OS_WIN) +#include +#include +static sqlite3_uint64 vfslog_time(){ + FILETIME ft; + sqlite3_uint64 u64time = 0; + + GetSystemTimeAsFileTime(&ft); -#define OS_TIME_VFS(eEvent, Z, flags, A, B, Call) { \ - InstVfs *pInstVfs = (InstVfs *)pVfs; \ - int rc; \ - sqlite_uint64 t = sqlite3Hwtime(); \ - rc = Call; \ - t = sqlite3Hwtime() - t; \ - pInstVfs->aTime[eEvent] += t; \ - pInstVfs->aCount[eEvent] += 1; \ - if( pInstVfs->xCall ){ \ - pInstVfs->xCall(pInstVfs->pClient,eEvent,0, t, rc, Z, flags, A, B); \ - } \ - return rc; \ + u64time |= ft.dwHighDateTime; + u64time <<= 32; + u64time |= ft.dwLowDateTime; + + /* ft is 100-nanosecond intervals, we want microseconds */ + return u64time /(sqlite3_uint64)10; +} +#else +static sqlite3_uint64 vfslog_time(){ + return 0; +} +#endif + +static void vfslog_call(sqlite3_vfs *, int, int, int, int, int, int); +static void vfslog_string(sqlite3_vfs *, const char *); + +/* +** Close an vfslog-file. +*/ +static int vfslogClose(sqlite3_file *pFile){ + sqlite3_uint64 t; + int rc = SQLITE_OK; + VfslogFile *p = (VfslogFile *)pFile; + + t = vfslog_time(); + if( p->pReal->pMethods ){ + rc = p->pReal->pMethods->xClose(p->pReal); + } + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_CLOSE, p->iFileId, t, rc, 0, 0); + return rc; } /* -** Close an inst-file. +** Read data from an vfslog-file. */ -static int instClose(sqlite3_file *pFile){ - OS_TIME_IO(OS_CLOSE, 0, 0, - (p->pReal->pMethods ? p->pReal->pMethods->xClose(p->pReal) : SQLITE_OK) - ); -} - -/* -** Read data from an inst-file. -*/ -static int instRead( +static int vfslogRead( sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst ){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)(((inst_file *)pFile)->pInstVfs); - OS_TIME_IO(OS_READ, iAmt, (binarylog_blob(pVfs, zBuf, iAmt, 1), iOfst), - p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst) - ); + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_READ, p->iFileId, t, rc, iAmt, (int)iOfst); + return rc; } /* -** Write data to an inst-file. +** Write data to an vfslog-file. */ -static int instWrite( +static int vfslogWrite( sqlite3_file *pFile, const void *z, int iAmt, sqlite_int64 iOfst ){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)(((inst_file *)pFile)->pInstVfs); - binarylog_blob(pVfs, z, iAmt, 1); - OS_TIME_IO(OS_WRITE, iAmt, iOfst, - p->pReal->pMethods->xWrite(p->pReal, z, iAmt, iOfst) - ); + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xWrite(p->pReal, z, iAmt, iOfst); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_WRITE, p->iFileId, t, rc, iAmt, (int)iOfst); + return rc; } /* -** Truncate an inst-file. +** Truncate an vfslog-file. */ -static int instTruncate(sqlite3_file *pFile, sqlite_int64 size){ - OS_TIME_IO(OS_TRUNCATE, 0, (int)size, - p->pReal->pMethods->xTruncate(p->pReal, size) - ); +static int vfslogTruncate(sqlite3_file *pFile, sqlite_int64 size){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xTruncate(p->pReal, size); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_TRUNCATE, p->iFileId, t, rc, 0, (int)size); + return rc; } /* -** Sync an inst-file. +** Sync an vfslog-file. */ -static int instSync(sqlite3_file *pFile, int flags){ - OS_TIME_IO(OS_SYNC, flags, 0, p->pReal->pMethods->xSync(p->pReal, flags)); +static int vfslogSync(sqlite3_file *pFile, int flags){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xSync(p->pReal, flags); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SYNC, p->iFileId, t, rc, flags, 0); + return rc; } /* -** Return the current file-size of an inst-file. +** Return the current file-size of an vfslog-file. */ -static int instFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ - OS_TIME_IO(OS_FILESIZE, (int)(*pSize), 0, - p->pReal->pMethods->xFileSize(p->pReal, pSize) - ); +static int vfslogFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xFileSize(p->pReal, pSize); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_FILESIZE, p->iFileId, t, rc, 0, (int)*pSize); + return rc; } /* -** Lock an inst-file. +** Lock an vfslog-file. */ -static int instLock(sqlite3_file *pFile, int eLock){ - OS_TIME_IO(OS_LOCK, eLock, 0, p->pReal->pMethods->xLock(p->pReal, eLock)); +static int vfslogLock(sqlite3_file *pFile, int eLock){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xLock(p->pReal, eLock); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_LOCK, p->iFileId, t, rc, eLock, 0); + return rc; } /* -** Unlock an inst-file. +** Unlock an vfslog-file. */ -static int instUnlock(sqlite3_file *pFile, int eLock){ - OS_TIME_IO(OS_UNLOCK, eLock, 0, p->pReal->pMethods->xUnlock(p->pReal, eLock)); +static int vfslogUnlock(sqlite3_file *pFile, int eLock){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xUnlock(p->pReal, eLock); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_UNLOCK, p->iFileId, t, rc, eLock, 0); + return rc; } /* -** Check if another file-handle holds a RESERVED lock on an inst-file. +** Check if another file-handle holds a RESERVED lock on an vfslog-file. */ -static int instCheckReservedLock(sqlite3_file *pFile, int *pResOut){ - OS_TIME_IO(OS_CHECKRESERVEDLOCK, 0, 0, - p->pReal->pMethods->xCheckReservedLock(p->pReal, pResOut) - ); +static int vfslogCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xCheckReservedLock(p->pReal, pResOut); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_CHECKRESERVEDLOCK, p->iFileId, t, rc, *pResOut, 0); + return rc; } /* -** File control method. For custom operations on an inst-file. +** File control method. For custom operations on an vfslog-file. */ -static int instFileControl(sqlite3_file *pFile, int op, void *pArg){ - OS_TIME_IO(OS_FILECONTROL, 0, 0, p->pReal->pMethods->xFileControl(p->pReal, op, pArg)); +static int vfslogFileControl(sqlite3_file *pFile, int op, void *pArg){ + VfslogFile *p = (VfslogFile *)pFile; + return p->pReal->pMethods->xFileControl(p->pReal, op, pArg); } /* -** Return the sector-size in bytes for an inst-file. +** Return the sector-size in bytes for an vfslog-file. */ -static int instSectorSize(sqlite3_file *pFile){ - OS_TIME_IO(OS_SECTORSIZE, 0, 0, p->pReal->pMethods->xSectorSize(p->pReal)); +static int vfslogSectorSize(sqlite3_file *pFile){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xSectorSize(p->pReal); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SECTORSIZE, p->iFileId, t, rc, 0, 0); + return rc; } /* -** Return the device characteristic flags supported by an inst-file. +** Return the device characteristic flags supported by an vfslog-file. */ -static int instDeviceCharacteristics(sqlite3_file *pFile){ - OS_TIME_IO(OS_DEVCHAR, 0, 0, p->pReal->pMethods->xDeviceCharacteristics(p->pReal)); +static int vfslogDeviceCharacteristics(sqlite3_file *pFile){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xDeviceCharacteristics(p->pReal); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_DEVCHAR, p->iFileId, t, rc, 0, 0); + return rc; } +static int vfslogShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xShmLock(p->pReal, ofst, n, flags); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SHMLOCK, p->iFileId, t, rc, 0, 0); + return rc; +} +static int vfslogShmMap( + sqlite3_file *pFile, + int iRegion, + int szRegion, + int isWrite, + volatile void **pp +){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xShmMap(p->pReal, iRegion, szRegion, isWrite, pp); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SHMMAP, p->iFileId, t, rc, 0, 0); + return rc; +} +static void vfslogShmBarrier(sqlite3_file *pFile){ + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + p->pReal->pMethods->xShmBarrier(p->pReal); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SHMBARRIER, p->iFileId, t, SQLITE_OK, 0, 0); +} +static int vfslogShmUnmap(sqlite3_file *pFile, int deleteFlag){ + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + t = vfslog_time(); + rc = p->pReal->pMethods->xShmUnmap(p->pReal, deleteFlag); + t = vfslog_time() - t; + vfslog_call(p->pVfslog, OS_SHMUNMAP, p->iFileId, t, rc, 0, 0); + return rc; +} + + /* -** Open an inst file handle. +** Open an vfslog file handle. */ -static int instOpen( +static int vfslogOpen( sqlite3_vfs *pVfs, const char *zName, sqlite3_file *pFile, int flags, int *pOutFlags ){ - inst_file *p = (inst_file *)pFile; - pFile->pMethods = &inst_io_methods; - p->pReal = (sqlite3_file *)&p[1]; - p->pInstVfs = (InstVfs *)pVfs; - p->zName = zName; - p->flags = flags; - p->iFileId = ++p->pInstVfs->iNextFileId; + int rc; + sqlite3_uint64 t; + VfslogFile *p = (VfslogFile *)pFile; + VfslogVfs *pLog = (VfslogVfs *)pVfs; - binarylog_blob(pVfs, zName, -1, 0); - OS_TIME_VFS(OS_OPEN, zName, flags, p->iFileId, 0, - REALVFS(pVfs)->xOpen(REALVFS(pVfs), zName, p->pReal, flags, pOutFlags) - ); + pFile->pMethods = &vfslog_io_methods; + p->pReal = (sqlite3_file *)&p[1]; + p->pVfslog = pVfs; + p->iFileId = ++pLog->iNextFileId; + + t = vfslog_time(); + rc = REALVFS(pVfs)->xOpen(REALVFS(pVfs), zName, p->pReal, flags, pOutFlags); + t = vfslog_time() - t; + + vfslog_call(pVfs, OS_OPEN, p->iFileId, t, rc, 0, 0); + vfslog_string(pVfs, zName); + return rc; } /* @@ -397,27 +500,35 @@ static int instOpen( ** ensure the file-system modifications are synced to disk before ** returning. */ -static int instDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ - binarylog_blob(pVfs, zPath, -1, 0); - OS_TIME_VFS(OS_DELETE, zPath, 0, dirSync, 0, - REALVFS(pVfs)->xDelete(REALVFS(pVfs), zPath, dirSync) - ); +static int vfslogDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int rc; + sqlite3_uint64 t; + t = vfslog_time(); + rc = REALVFS(pVfs)->xDelete(REALVFS(pVfs), zPath, dirSync); + t = vfslog_time() - t; + vfslog_call(pVfs, OS_DELETE, 0, t, rc, dirSync, 0); + vfslog_string(pVfs, zPath); + return rc; } /* ** Test for access permissions. Return true if the requested permission ** is available, or false otherwise. */ -static int instAccess( +static int vfslogAccess( sqlite3_vfs *pVfs, const char *zPath, int flags, int *pResOut ){ - binarylog_blob(pVfs, zPath, -1, 0); - OS_TIME_VFS(OS_ACCESS, zPath, 0, flags, *pResOut, - REALVFS(pVfs)->xAccess(REALVFS(pVfs), zPath, flags, pResOut) - ); + int rc; + sqlite3_uint64 t; + t = vfslog_time(); + rc = REALVFS(pVfs)->xAccess(REALVFS(pVfs), zPath, flags, pResOut); + t = vfslog_time() - t; + vfslog_call(pVfs, OS_ACCESS, 0, t, rc, flags, *pResOut); + vfslog_string(pVfs, zPath); + return rc; } /* @@ -425,21 +536,19 @@ static int instAccess( ** to the pathname in zPath. zOut is guaranteed to point to a buffer ** of at least (INST_MAX_PATHNAME+1) bytes. */ -static int instFullPathname( +static int vfslogFullPathname( sqlite3_vfs *pVfs, const char *zPath, int nOut, char *zOut ){ - OS_TIME_VFS( OS_FULLPATHNAME, zPath, 0, 0, 0, - REALVFS(pVfs)->xFullPathname(REALVFS(pVfs), zPath, nOut, zOut); - ); + return REALVFS(pVfs)->xFullPathname(REALVFS(pVfs), zPath, nOut, zOut); } /* ** Open the dynamic library located at zPath and return a handle. */ -static void *instDlOpen(sqlite3_vfs *pVfs, const char *zPath){ +static void *vfslogDlOpen(sqlite3_vfs *pVfs, const char *zPath){ return REALVFS(pVfs)->xDlOpen(REALVFS(pVfs), zPath); } @@ -448,21 +557,21 @@ static void *instDlOpen(sqlite3_vfs *pVfs, const char *zPath){ ** utf-8 string describing the most recent error encountered associated ** with dynamic libraries. */ -static void instDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ +static void vfslogDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ REALVFS(pVfs)->xDlError(REALVFS(pVfs), nByte, zErrMsg); } /* ** Return a pointer to the symbol zSymbol in the dynamic library pHandle. */ -static void (*instDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ +static void (*vfslogDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ return REALVFS(pVfs)->xDlSym(REALVFS(pVfs), p, zSym); } /* ** Close the dynamic library handle pHandle. */ -static void instDlClose(sqlite3_vfs *pVfs, void *pHandle){ +static void vfslogDlClose(sqlite3_vfs *pVfs, void *pHandle){ REALVFS(pVfs)->xDlClose(REALVFS(pVfs), pHandle); } @@ -470,169 +579,33 @@ static void instDlClose(sqlite3_vfs *pVfs, void *pHandle){ ** Populate the buffer pointed to by zBufOut with nByte bytes of ** random data. */ -static int instRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ - OS_TIME_VFS( OS_RANDOMNESS, 0, 0, nByte, 0, - REALVFS(pVfs)->xRandomness(REALVFS(pVfs), nByte, zBufOut); - ); +static int vfslogRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return REALVFS(pVfs)->xRandomness(REALVFS(pVfs), nByte, zBufOut); } /* ** Sleep for nMicro microseconds. Return the number of microseconds ** actually slept. */ -static int instSleep(sqlite3_vfs *pVfs, int nMicro){ - OS_TIME_VFS( OS_SLEEP, 0, 0, nMicro, 0, - REALVFS(pVfs)->xSleep(REALVFS(pVfs), nMicro) - ); +static int vfslogSleep(sqlite3_vfs *pVfs, int nMicro){ + return REALVFS(pVfs)->xSleep(REALVFS(pVfs), nMicro); } /* ** Return the current time as a Julian Day number in *pTimeOut. */ -static int instCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ - OS_TIME_VFS( OS_CURRENTTIME, 0, 0, 0, 0, - REALVFS(pVfs)->xCurrentTime(REALVFS(pVfs), pTimeOut) - ); +static int vfslogCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return REALVFS(pVfs)->xCurrentTime(REALVFS(pVfs), pTimeOut); } -sqlite3_vfs *sqlite3_instvfs_create(const char *zName, const char *zParent){ - int nByte; - InstVfs *p; - sqlite3_vfs *pParent; - - pParent = sqlite3_vfs_find(zParent); - if( !pParent ){ - return 0; - } - - nByte = strlen(zName) + 1 + sizeof(InstVfs); - p = (InstVfs *)sqlite3_malloc(nByte); - if( p ){ - char *zCopy = (char *)&p[1]; - memset(p, 0, nByte); - memcpy(p, &inst_vfs, sizeof(sqlite3_vfs)); - p->pVfs = pParent; - memcpy(zCopy, zName, strlen(zName)); - p->base.zName = (const char *)zCopy; - p->base.szOsFile += pParent->szOsFile; - sqlite3_vfs_register((sqlite3_vfs *)p, 0); - } - - return (sqlite3_vfs *)p; +static int vfslogGetLastError(sqlite3_vfs *pVfs, int a, char *b){ + return REALVFS(pVfs)->xGetLastError(REALVFS(pVfs), a, b); +} +static int vfslogCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *p){ + return REALVFS(pVfs)->xCurrentTimeInt64(REALVFS(pVfs), p); } -void sqlite3_instvfs_configure( - sqlite3_vfs *pVfs, - void (*xCall)( - void*, - int, /* File id */ - int, /* Event code */ - sqlite3_int64, - int, /* Return code */ - const char*, /* File name */ - int, - int, - sqlite3_int64 - ), - void *pClient, - void (*xDel)(void *) -){ - InstVfs *p = (InstVfs *)pVfs; - assert( pVfs->xOpen==instOpen ); - if( p->xDel ){ - p->xDel(p->pClient); - } - p->xCall = xCall; - p->xDel = xDel; - p->pClient = pClient; -} - -void sqlite3_instvfs_destroy(sqlite3_vfs *pVfs){ - if( pVfs ){ - sqlite3_vfs_unregister(pVfs); - sqlite3_instvfs_configure(pVfs, 0, 0, 0); - sqlite3_free(pVfs); - } -} - -void sqlite3_instvfs_reset(sqlite3_vfs *pVfs){ - InstVfs *p = (InstVfs *)pVfs; - assert( pVfs->xOpen==instOpen ); - memset(p->aTime, 0, sizeof(sqlite3_int64)*OS_NUMEVENTS); - memset(p->aCount, 0, sizeof(int)*OS_NUMEVENTS); -} - -const char *sqlite3_instvfs_name(int eEvent){ - const char *zEvent = 0; - - switch( eEvent ){ - case OS_CLOSE: zEvent = "xClose"; break; - case OS_READ: zEvent = "xRead"; break; - case OS_WRITE: zEvent = "xWrite"; break; - case OS_TRUNCATE: zEvent = "xTruncate"; break; - case OS_SYNC: zEvent = "xSync"; break; - case OS_FILESIZE: zEvent = "xFilesize"; break; - case OS_LOCK: zEvent = "xLock"; break; - case OS_UNLOCK: zEvent = "xUnlock"; break; - case OS_CHECKRESERVEDLOCK: zEvent = "xCheckReservedLock"; break; - case OS_FILECONTROL: zEvent = "xFileControl"; break; - case OS_SECTORSIZE: zEvent = "xSectorSize"; break; - case OS_DEVCHAR: zEvent = "xDeviceCharacteristics"; break; - case OS_OPEN: zEvent = "xOpen"; break; - case OS_DELETE: zEvent = "xDelete"; break; - case OS_ACCESS: zEvent = "xAccess"; break; - case OS_FULLPATHNAME: zEvent = "xFullPathname"; break; - case OS_RANDOMNESS: zEvent = "xRandomness"; break; - case OS_SLEEP: zEvent = "xSleep"; break; - case OS_CURRENTTIME: zEvent = "xCurrentTime"; break; - } - - return zEvent; -} - -void sqlite3_instvfs_get( - sqlite3_vfs *pVfs, - int eEvent, - const char **pzEvent, - sqlite3_int64 *pnClick, - int *pnCall -){ - InstVfs *p = (InstVfs *)pVfs; - assert( pVfs->xOpen==instOpen ); - if( eEvent<1 || eEvent>=OS_NUMEVENTS ){ - *pzEvent = 0; - *pnClick = 0; - *pnCall = 0; - return; - } - - *pzEvent = sqlite3_instvfs_name(eEvent); - *pnClick = p->aTime[eEvent]; - *pnCall = p->aCount[eEvent]; -} - -#define BINARYLOG_BUFFERSIZE 8192 - -struct InstVfsBinaryLog { - int nBuf; - char *zBuf; - sqlite3_int64 iOffset; - int log_data; - sqlite3_file *pOut; - char *zOut; /* Log file name */ -}; -typedef struct InstVfsBinaryLog InstVfsBinaryLog; - -static void put32bits(unsigned char *p, unsigned int v){ - p[0] = v>>24; - p[1] = v>>16; - p[2] = v>>8; - p[3] = v; -} - -static void binarylog_flush(InstVfsBinaryLog *pLog){ - sqlite3_file *pFile = pLog->pOut; - +static void vfslog_flush(VfslogVfs *p){ #ifdef SQLITE_TEST extern int sqlite3_io_error_pending; extern int sqlite3_io_error_persist; @@ -647,9 +620,11 @@ static void binarylog_flush(InstVfsBinaryLog *pLog){ sqlite3_diskfull_pending = 0; #endif - pFile->pMethods->xWrite(pFile, pLog->zBuf, pLog->nBuf, pLog->iOffset); - pLog->iOffset += pLog->nBuf; - pLog->nBuf = 0; + if( p->nBuf ){ + p->pLog->pMethods->xWrite(p->pLog, p->aBuf, p->nBuf, p->iOffset); + p->iOffset += p->nBuf; + p->nBuf = 0; + } #ifdef SQLITE_TEST sqlite3_io_error_pending = pending; @@ -658,409 +633,578 @@ static void binarylog_flush(InstVfsBinaryLog *pLog){ #endif } -static void binarylog_xcall( - void *p, +static void put32bits(unsigned char *p, unsigned int v){ + p[0] = v>>24; + p[1] = v>>16; + p[2] = v>>8; + p[3] = v; +} + +static void vfslog_call( + sqlite3_vfs *pVfs, int eEvent, - int iFileId, - sqlite3_int64 nClick, + int iFileid, + int nClick, int return_code, - const char *zName, - int flags, - int nByte, - sqlite3_int64 iOffset + int size, + int offset ){ - InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)p; + VfslogVfs *p = (VfslogVfs *)pVfs; unsigned char *zRec; - if( (28+pLog->nBuf)>BINARYLOG_BUFFERSIZE ){ - binarylog_flush(pLog); + if( (24+p->nBuf)>sizeof(p->aBuf) ){ + vfslog_flush(p); } - zRec = (unsigned char *)&pLog->zBuf[pLog->nBuf]; + zRec = (unsigned char *)&p->aBuf[p->nBuf]; put32bits(&zRec[0], eEvent); - put32bits(&zRec[4], (int)iFileId); - put32bits(&zRec[8], (int)nClick); + put32bits(&zRec[4], iFileid); + put32bits(&zRec[8], nClick); put32bits(&zRec[12], return_code); - put32bits(&zRec[16], flags); - put32bits(&zRec[20], nByte); - put32bits(&zRec[24], (int)iOffset); - pLog->nBuf += 28; + put32bits(&zRec[16], size); + put32bits(&zRec[20], offset); + p->nBuf += 24; } -static void binarylog_xdel(void *p){ - /* Close the log file and free the memory allocated for the - ** InstVfsBinaryLog structure. - */ - InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)p; - sqlite3_file *pFile = pLog->pOut; - if( pLog->nBuf ){ - binarylog_flush(pLog); +static void vfslog_string(sqlite3_vfs *pVfs, const char *zStr){ + VfslogVfs *p = (VfslogVfs *)pVfs; + unsigned char *zRec; + int nStr = zStr ? strlen(zStr) : 0; + if( (4+nStr+p->nBuf)>sizeof(p->aBuf) ){ + vfslog_flush(p); } - pFile->pMethods->xClose(pFile); - sqlite3_free(pLog->pOut); - sqlite3_free(pLog->zBuf); - sqlite3_free(pLog); -} - -static void binarylog_blob( - sqlite3_vfs *pVfs, - const char *zBlob, - int nBlob, - int isBinary -){ - InstVfsBinaryLog *pLog; - InstVfs *pInstVfs = (InstVfs *)pVfs; - - if( pVfs->xOpen!=instOpen || pInstVfs->xCall!=binarylog_xcall ){ - return; + zRec = (unsigned char *)&p->aBuf[p->nBuf]; + put32bits(&zRec[0], nStr); + if( zStr ){ + memcpy(&zRec[4], zStr, nStr); } - pLog = (InstVfsBinaryLog *)pInstVfs->pClient; - if( zBlob && (!isBinary || pLog->log_data) ){ - unsigned char *zRec; - int nWrite; + p->nBuf += (4 + nStr); +} - if( nBlob<0 ){ - nBlob = strlen(zBlob); - } - nWrite = nBlob + 28; - - if( (nWrite+pLog->nBuf)>BINARYLOG_BUFFERSIZE ){ - binarylog_flush(pLog); - } - - zRec = (unsigned char *)&pLog->zBuf[pLog->nBuf]; - memset(zRec, 0, nWrite); - put32bits(&zRec[0], BINARYLOG_STRING); - put32bits(&zRec[4], (int)nBlob); - put32bits(&zRec[8], (int)isBinary); - memcpy(&zRec[28], zBlob, nBlob); - pLog->nBuf += nWrite; +static void vfslog_finalize(VfslogVfs *p){ + if( p->pLog->pMethods ){ + vfslog_flush(p); + p->pLog->pMethods->xClose(p->pLog); } + sqlite3_free(p); } -void sqlite3_instvfs_binarylog_call( - sqlite3_vfs *pVfs, - int eEvent, - sqlite3_int64 nClick, - int return_code, - const char *zString -){ - InstVfs *pInstVfs = (InstVfs *)pVfs; - InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)pInstVfs->pClient; - - if( zString ){ - binarylog_blob(pVfs, zString, -1, 0); - } - binarylog_xcall(pLog, eEvent, 0, nClick, return_code, 0, 0, 0, 0); -} - -void sqlite3_instvfs_binarylog_marker( - sqlite3_vfs *pVfs, - const char *zMarker -){ - InstVfs *pInstVfs = (InstVfs *)pVfs; - InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)pInstVfs->pClient; - binarylog_blob(pVfs, zMarker, -1, 0); - binarylog_xcall(pLog, BINARYLOG_MARKER, 0, 0, 0, 0, 0, 0, 0); -} - -sqlite3_vfs *sqlite3_instvfs_binarylog( - const char *zVfs, - const char *zParentVfs, - const char *zLog, - int log_data -){ - InstVfsBinaryLog *p; +int sqlite3_vfslog_finalize(const char *zVfs){ sqlite3_vfs *pVfs; + pVfs = sqlite3_vfs_find(zVfs); + if( !pVfs || pVfs->xOpen!=vfslogOpen ){ + return SQLITE_ERROR; + } + sqlite3_vfs_unregister(pVfs); + vfslog_finalize((VfslogVfs *)pVfs); + return SQLITE_OK; +} + +int sqlite3_vfslog_new( + const char *zVfs, /* New VFS name */ + const char *zParentVfs, /* Parent VFS name (or NULL) */ + const char *zLog /* Log file name */ +){ + VfslogVfs *p; sqlite3_vfs *pParent; int nByte; int flags; int rc; + char *zFile; + int nVfs; pParent = sqlite3_vfs_find(zParentVfs); if( !pParent ){ - return 0; + return SQLITE_ERROR; } - nByte = sizeof(InstVfsBinaryLog) + pParent->mxPathname+1; - p = (InstVfsBinaryLog *)sqlite3_malloc(nByte); + nVfs = strlen(zVfs); + nByte = sizeof(VfslogVfs) + pParent->szOsFile + nVfs+1+pParent->mxPathname+1; + p = (VfslogVfs *)sqlite3_malloc(nByte); memset(p, 0, nByte); - p->zBuf = sqlite3_malloc(BINARYLOG_BUFFERSIZE); - p->zOut = (char *)&p[1]; - p->pOut = (sqlite3_file *)sqlite3_malloc(pParent->szOsFile); - p->log_data = log_data; - pParent->xFullPathname(pParent, zLog, pParent->mxPathname, p->zOut); + + p->pVfs = pParent; + p->pLog = (sqlite3_file *)&p[1]; + memcpy(&p->base, &vfslog_vfs, sizeof(sqlite3_vfs)); + p->base.zName = &((char *)p->pLog)[pParent->szOsFile]; + p->base.szOsFile += pParent->szOsFile; + memcpy((char *)p->base.zName, zVfs, nVfs); + + zFile = (char *)&p->base.zName[nVfs+1]; + pParent->xFullPathname(pParent, zLog, pParent->mxPathname, zFile); + flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_MASTER_JOURNAL; - pParent->xDelete(pParent, p->zOut, 0); - rc = pParent->xOpen(pParent, p->zOut, p->pOut, flags, &flags); + pParent->xDelete(pParent, zFile, 0); + rc = pParent->xOpen(pParent, zFile, p->pLog, flags, &flags); if( rc==SQLITE_OK ){ - memcpy(p->zBuf, "sqlite_ostrace1.....", 20); + memcpy(p->aBuf, "sqlite_ostrace1.....", 20); p->iOffset = 0; p->nBuf = 20; + rc = sqlite3_vfs_register((sqlite3_vfs *)p, 1); } if( rc ){ - binarylog_xdel(p); - return 0; + vfslog_finalize(p); } - - pVfs = sqlite3_instvfs_create(zVfs, zParentVfs); - if( pVfs ){ - sqlite3_instvfs_configure(pVfs, binarylog_xcall, p, binarylog_xdel); - } - - return pVfs; + return rc; } -#endif /* SQLITE_ENABLE_INSTVFS */ + +int sqlite3_vfslog_annotate(const char *zVfs, const char *zMsg){ + sqlite3_vfs *pVfs; + pVfs = sqlite3_vfs_find(zVfs); + if( !pVfs || pVfs->xOpen!=vfslogOpen ){ + return SQLITE_ERROR; + } + vfslog_call(pVfs, OS_ANNOTATE, 0, 0, 0, 0, 0); + vfslog_string(pVfs, zMsg); + return SQLITE_OK; +} + +static const char *vfslog_eventname(int eEvent){ + const char *zEvent = 0; + + switch( eEvent ){ + case OS_CLOSE: zEvent = "xClose"; break; + case OS_READ: zEvent = "xRead"; break; + case OS_WRITE: zEvent = "xWrite"; break; + case OS_TRUNCATE: zEvent = "xTruncate"; break; + case OS_SYNC: zEvent = "xSync"; break; + case OS_FILESIZE: zEvent = "xFilesize"; break; + case OS_LOCK: zEvent = "xLock"; break; + case OS_UNLOCK: zEvent = "xUnlock"; break; + case OS_CHECKRESERVEDLOCK: zEvent = "xCheckResLock"; break; + case OS_FILECONTROL: zEvent = "xFileControl"; break; + case OS_SECTORSIZE: zEvent = "xSectorSize"; break; + case OS_DEVCHAR: zEvent = "xDeviceChar"; break; + case OS_OPEN: zEvent = "xOpen"; break; + case OS_DELETE: zEvent = "xDelete"; break; + case OS_ACCESS: zEvent = "xAccess"; break; + case OS_FULLPATHNAME: zEvent = "xFullPathname"; break; + case OS_RANDOMNESS: zEvent = "xRandomness"; break; + case OS_SLEEP: zEvent = "xSleep"; break; + case OS_CURRENTTIME: zEvent = "xCurrentTime"; break; + + case OS_SHMUNMAP: zEvent = "xShmUnmap"; break; + case OS_SHMLOCK: zEvent = "xShmLock"; break; + case OS_SHMBARRIER: zEvent = "xShmBarrier"; break; + case OS_SHMMAP: zEvent = "xShmMap"; break; + + case OS_ANNOTATE: zEvent = "annotation"; break; + } + + return zEvent; +} + +typedef struct VfslogVtab VfslogVtab; +typedef struct VfslogCsr VfslogCsr; + +/* +** Virtual table type for the vfslog reader module. +*/ +struct VfslogVtab { + sqlite3_vtab base; /* Base class */ + sqlite3_file *pFd; /* File descriptor open on vfslog file */ + sqlite3_int64 nByte; /* Size of file in bytes */ + char *zFile; /* File name for pFd */ +}; + +/* +** Virtual table cursor type for the vfslog reader module. +*/ +struct VfslogCsr { + sqlite3_vtab_cursor base; /* Base class */ + sqlite3_int64 iRowid; /* Current rowid. */ + sqlite3_int64 iOffset; /* Offset of next record in file */ + char *zTransient; /* Transient 'file' string */ + int nFile; /* Size of array azFile[] */ + char **azFile; /* File strings */ + unsigned char aBuf[1024]; /* Current vfs log entry (read from file) */ +}; + +static unsigned int get32bits(unsigned char *p){ + return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; +} + +/* +** The argument must point to a buffer containing a nul-terminated string. +** If the string begins with an SQL quote character it is overwritten by +** the dequoted version. Otherwise the buffer is left unmodified. +*/ +static void dequote(char *z){ + char quote; /* Quote character (if any ) */ + quote = z[0]; + if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){ + int iIn = 1; /* Index of next byte to read from input */ + int iOut = 0; /* Index of next byte to write to output */ + if( quote=='[' ) quote = ']'; + while( z[iIn] ){ + if( z[iIn]==quote ){ + if( z[iIn+1]!=quote ) break; + z[iOut++] = quote; + iIn += 2; + }else{ + z[iOut++] = z[iIn++]; + } + } + z[iOut] = '\0'; + } +} + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Connect to or create a vfslog virtual table. +*/ +static int vlogConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + sqlite3_vfs *pVfs; /* VFS used to read log file */ + int flags; /* flags passed to pVfs->xOpen() */ + VfslogVtab *p; + int rc; + int nByte; + char *zFile; + + *ppVtab = 0; + pVfs = sqlite3_vfs_find(0); + nByte = sizeof(VfslogVtab) + pVfs->szOsFile + pVfs->mxPathname; + p = sqlite3_malloc(nByte); + if( p==0 ) return SQLITE_NOMEM; + memset(p, 0, nByte); + + p->pFd = (sqlite3_file *)&p[1]; + p->zFile = &((char *)p->pFd)[pVfs->szOsFile]; + + zFile = sqlite3_mprintf("%s", argv[3]); + if( !zFile ){ + sqlite3_free(p); + return SQLITE_NOMEM; + } + dequote(zFile); + pVfs->xFullPathname(pVfs, zFile, pVfs->mxPathname, p->zFile); + sqlite3_free(zFile); + + flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MASTER_JOURNAL; + rc = pVfs->xOpen(pVfs, p->zFile, p->pFd, flags, &flags); + + if( rc==SQLITE_OK ){ + p->pFd->pMethods->xFileSize(p->pFd, &p->nByte); + sqlite3_declare_vtab(db, + "CREATE TABLE xxx(event, file, click, rc, size, offset)" + ); + *ppVtab = &p->base; + }else{ + sqlite3_free(p); + } + + return rc; +} + +/* +** There is no "best-index". This virtual table always does a linear +** scan of the binary VFS log file. +*/ +static int vlogBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + pIdxInfo->estimatedCost = 10.0; + return SQLITE_OK; +} + +/* +** Disconnect from or destroy a vfslog virtual table. +*/ +static int vlogDisconnect(sqlite3_vtab *pVtab){ + VfslogVtab *p = (VfslogVtab *)pVtab; + if( p->pFd->pMethods ){ + p->pFd->pMethods->xClose(p->pFd); + p->pFd->pMethods = 0; + } + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Open a new vfslog cursor. +*/ +static int vlogOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + VfslogCsr *pCsr; /* Newly allocated cursor object */ + + pCsr = sqlite3_malloc(sizeof(VfslogCsr)); + if( !pCsr ) return SQLITE_NOMEM; + memset(pCsr, 0, sizeof(VfslogCsr)); + *ppCursor = &pCsr->base; + return SQLITE_OK; +} + +/* +** Close a vfslog cursor. +*/ +static int vlogClose(sqlite3_vtab_cursor *pCursor){ + VfslogCsr *p = (VfslogCsr *)pCursor; + int i; + for(i=0; inFile; i++){ + sqlite3_free(p->azFile[i]); + } + sqlite3_free(p->azFile); + sqlite3_free(p->zTransient); + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Move a vfslog cursor to the next entry in the file. +*/ +static int vlogNext(sqlite3_vtab_cursor *pCursor){ + VfslogCsr *pCsr = (VfslogCsr *)pCursor; + VfslogVtab *p = (VfslogVtab *)pCursor->pVtab; + int rc = SQLITE_OK; + int nRead; + + sqlite3_free(pCsr->zTransient); + pCsr->zTransient = 0; + + nRead = 24; + if( pCsr->iOffset+nRead<=p->nByte ){ + int eEvent; + rc = p->pFd->pMethods->xRead(p->pFd, pCsr->aBuf, nRead, pCsr->iOffset); + + eEvent = get32bits(pCsr->aBuf); + if( (rc==SQLITE_OK) + && (eEvent==OS_OPEN || eEvent==OS_DELETE || eEvent==OS_ACCESS) + ){ + char buf[4]; + rc = p->pFd->pMethods->xRead(p->pFd, buf, 4, pCsr->iOffset+nRead); + nRead += 4; + if( rc==SQLITE_OK ){ + int nStr = get32bits((unsigned char *)buf); + char *zStr = sqlite3_malloc(nStr+1); + rc = p->pFd->pMethods->xRead(p->pFd, zStr, nStr, pCsr->iOffset+nRead); + zStr[nStr] = '\0'; + nRead += nStr; + + if( eEvent==OS_OPEN ){ + int iFileid = get32bits(&pCsr->aBuf[4]); + if( iFileid>=pCsr->nFile ){ + int nNew = sizeof(pCsr->azFile[0])*(iFileid+1); + pCsr->azFile = (char **)sqlite3_realloc(pCsr->azFile, nNew); + nNew -= sizeof(pCsr->azFile[0])*pCsr->nFile; + memset(&pCsr->azFile[pCsr->nFile], 0, nNew); + pCsr->nFile = iFileid+1; + } + sqlite3_free(pCsr->azFile[iFileid]); + pCsr->azFile[iFileid] = zStr; + }else{ + pCsr->zTransient = zStr; + } + } + } + } + + pCsr->iRowid += 1; + pCsr->iOffset += nRead; + return rc; +} + +static int vlogEof(sqlite3_vtab_cursor *pCursor){ + VfslogCsr *pCsr = (VfslogCsr *)pCursor; + VfslogVtab *p = (VfslogVtab *)pCursor->pVtab; + return (pCsr->iOffset>=p->nByte); +} + +static int vlogFilter( + sqlite3_vtab_cursor *pCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + VfslogCsr *pCsr = (VfslogCsr *)pCursor; + pCsr->iRowid = 0; + pCsr->iOffset = 20; + return vlogNext(pCursor); +} + +static int vlogColumn( + sqlite3_vtab_cursor *pCursor, + sqlite3_context *ctx, + int i +){ + unsigned int val; + VfslogCsr *pCsr = (VfslogCsr *)pCursor; + + assert( i<7 ); + val = get32bits(&pCsr->aBuf[4*i]); + + switch( i ){ + case 0: { + sqlite3_result_text(ctx, vfslog_eventname(val), -1, SQLITE_STATIC); + break; + } + case 1: { + char *zStr = pCsr->zTransient; + if( val!=0 && valnFile ){ + zStr = pCsr->azFile[val]; + } + sqlite3_result_text(ctx, zStr, -1, SQLITE_TRANSIENT); + break; + } + default: + sqlite3_result_int(ctx, val); + break; + } + + return SQLITE_OK; +} + +static int vlogRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + VfslogCsr *pCsr = (VfslogCsr *)pCursor; + *pRowid = pCsr->iRowid; + return SQLITE_OK; +} + +int sqlite3_vfslog_register(sqlite3 *db){ + static sqlite3_module vfslog_module = { + 0, /* iVersion */ + vlogConnect, /* xCreate */ + vlogConnect, /* xConnect */ + vlogBestIndex, /* xBestIndex */ + vlogDisconnect, /* xDisconnect */ + vlogDisconnect, /* xDestroy */ + vlogOpen, /* xOpen - open a cursor */ + vlogClose, /* xClose - close a cursor */ + vlogFilter, /* xFilter - configure scan constraints */ + vlogNext, /* xNext - advance a cursor */ + vlogEof, /* xEof - check for end of scan */ + vlogColumn, /* xColumn - read data */ + vlogRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ + }; + + sqlite3_create_module(db, "vfslog", &vfslog_module, 0); + return SQLITE_OK; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ /************************************************************************** *************************************************************************** ** Tcl interface starts here. */ -#if SQLITE_TEST + +#if defined(SQLITE_TEST) || defined(TCLSH) #include -#ifdef SQLITE_ENABLE_INSTVFS -struct InstVfsCall { - Tcl_Interp *interp; - Tcl_Obj *pScript; -}; -typedef struct InstVfsCall InstVfsCall; - -static void test_instvfs_xcall( - void *p, - int eEvent, - int iFileId, - sqlite3_int64 nClick, - int return_code, - const char *zName, - int flags, - int nByte, - sqlite3_int64 iOffset -){ - int rc; - InstVfsCall *pCall = (InstVfsCall *)p; - Tcl_Obj *pObj = Tcl_DuplicateObj( pCall->pScript); - const char *zEvent = sqlite3_instvfs_name(eEvent); - - Tcl_IncrRefCount(pObj); - Tcl_ListObjAppendElement(0, pObj, Tcl_NewStringObj(zEvent, -1)); - Tcl_ListObjAppendElement(0, pObj, Tcl_NewWideIntObj(nClick)); - Tcl_ListObjAppendElement(0, pObj, Tcl_NewStringObj(zName, -1)); - Tcl_ListObjAppendElement(0, pObj, Tcl_NewIntObj(nByte)); - Tcl_ListObjAppendElement(0, pObj, Tcl_NewWideIntObj(iOffset)); - - rc = Tcl_EvalObjEx(pCall->interp, pObj, TCL_EVAL_GLOBAL|TCL_EVAL_DIRECT); - if( rc ){ - Tcl_BackgroundError(pCall->interp); - } - Tcl_DecrRefCount(pObj); -} - -static void test_instvfs_xdel(void *p){ - InstVfsCall *pCall = (InstVfsCall *)p; - Tcl_DecrRefCount(pCall->pScript); - sqlite3_free(pCall); -} - -static int test_sqlite3_instvfs( - void * clientData, +static int test_vfslog( + void *clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ - static const char *IV_strs[] = - { "create", "destroy", "reset", "report", "configure", "binarylog", "marker", 0 }; - enum IV_enum { IV_CREATE, IV_DESTROY, IV_RESET, IV_REPORT, IV_CONFIGURE, IV_BINARYLOG, IV_MARKER }; + struct SqliteDb { sqlite3 *db; }; + sqlite3 *db; + Tcl_CmdInfo cmdInfo; + int rc = SQLITE_ERROR; + + static const char *strs[] = { "annotate", "finalize", "new", "register", 0 }; + enum VL_enum { VL_ANNOTATE, VL_FINALIZE, VL_NEW, VL_REGISTER }; int iSub; if( objc<2 ){ Tcl_WrongNumArgs(interp, 1, objv, "SUB-COMMAND ..."); + return TCL_ERROR; } - if( Tcl_GetIndexFromObj(interp, objv[1], IV_strs, "sub-command", 0, &iSub) ){ + if( Tcl_GetIndexFromObj(interp, objv[1], strs, "sub-command", 0, &iSub) ){ return TCL_ERROR; } - switch( (enum IV_enum)iSub ){ - case IV_CREATE: { - char *zParent = 0; - sqlite3_vfs *p; - int isDefault = 0; - if( objc>2 && 0==strcmp("-default", Tcl_GetString(objv[2])) ){ - isDefault = 1; - } - if( (objc-isDefault)!=4 && (objc-isDefault)!=3 ){ - Tcl_WrongNumArgs(interp, 2, objv, "?-default? NAME ?PARENT-VFS?"); - return TCL_ERROR; - } - if( objc==(4+isDefault) ){ - zParent = Tcl_GetString(objv[3+isDefault]); - } - p = sqlite3_instvfs_create(Tcl_GetString(objv[2+isDefault]), zParent); - if( !p ){ - Tcl_AppendResult(interp, "error creating vfs ", 0); - return TCL_ERROR; - } - if( isDefault ){ - sqlite3_vfs_register(p, 1); - } - Tcl_SetObjResult(interp, objv[2]); - break; - } - case IV_BINARYLOG: { - char *zName = 0; - char *zLog = 0; - char *zParent = 0; - sqlite3_vfs *p; - int isDefault = 0; - int isLogdata = 0; - int argbase = 2; - - for(argbase=2; argbase<(objc-2); argbase++){ - if( 0==strcmp("-default", Tcl_GetString(objv[argbase])) ){ - isDefault = 1; - } - else if( 0==strcmp("-parent", Tcl_GetString(objv[argbase])) ){ - argbase++; - zParent = Tcl_GetString(objv[argbase]); - } - else if( 0==strcmp("-logdata", Tcl_GetString(objv[argbase])) ){ - isLogdata = 1; - }else{ - break; - } - } - - if( (objc-argbase)!=2 ){ - Tcl_WrongNumArgs( - interp, 2, objv, "?-default? ?-parent VFS? ?-logdata? NAME LOGFILE" - ); - return TCL_ERROR; - } - zName = Tcl_GetString(objv[argbase]); - zLog = Tcl_GetString(objv[argbase+1]); - p = sqlite3_instvfs_binarylog(zName, zParent, zLog, isLogdata); - if( !p ){ - Tcl_AppendResult(interp, "error creating vfs ", 0); - return TCL_ERROR; - } - if( isDefault ){ - sqlite3_vfs_register(p, 1); - } - Tcl_SetObjResult(interp, objv[2]); - break; - } - - case IV_MARKER: { - sqlite3_vfs *p; + switch( (enum VL_enum)iSub ){ + case VL_ANNOTATE: { + int rc; + char *zVfs; + char *zMsg; if( objc!=4 ){ - Tcl_WrongNumArgs(interp, 2, objv, "VFS MARKER"); + Tcl_WrongNumArgs(interp, 3, objv, "VFS"); return TCL_ERROR; } - p = sqlite3_vfs_find(Tcl_GetString(objv[2])); - if( !p || p->xOpen!=instOpen ){ - Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); + zVfs = Tcl_GetString(objv[2]); + zMsg = Tcl_GetString(objv[3]); + rc = sqlite3_vfslog_annotate(zVfs, zMsg); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "failed", 0); return TCL_ERROR; } - sqlite3_instvfs_binarylog_marker(p, Tcl_GetString(objv[3])); - Tcl_ResetResult(interp); - break; - } - - case IV_CONFIGURE: { - InstVfsCall *pCall; - - sqlite3_vfs *p; - if( objc!=4 ){ - Tcl_WrongNumArgs(interp, 2, objv, "NAME SCRIPT"); - return TCL_ERROR; - } - p = sqlite3_vfs_find(Tcl_GetString(objv[2])); - if( !p || p->xOpen!=instOpen ){ - Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); - return TCL_ERROR; - } - - if( strlen(Tcl_GetString(objv[3])) ){ - pCall = (InstVfsCall *)sqlite3_malloc(sizeof(InstVfsCall)); - pCall->interp = interp; - pCall->pScript = Tcl_DuplicateObj(objv[3]); - Tcl_IncrRefCount(pCall->pScript); - sqlite3_instvfs_configure(p, - test_instvfs_xcall, (void *)pCall, test_instvfs_xdel - ); - }else{ - sqlite3_instvfs_configure(p, 0, 0, 0); - } break; } - - case IV_REPORT: - case IV_DESTROY: - case IV_RESET: { - sqlite3_vfs *p; + case VL_FINALIZE: { + int rc; + char *zVfs; if( objc!=3 ){ - Tcl_WrongNumArgs(interp, 2, objv, "NAME"); + Tcl_WrongNumArgs(interp, 2, objv, "VFS"); return TCL_ERROR; } - p = sqlite3_vfs_find(Tcl_GetString(objv[2])); - if( !p || p->xOpen!=instOpen ){ - Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); + zVfs = Tcl_GetString(objv[2]); + rc = sqlite3_vfslog_finalize(zVfs); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "failed", 0); return TCL_ERROR; } - - if( ((enum IV_enum)iSub)==IV_DESTROY ){ - sqlite3_instvfs_destroy(p); - } - if( ((enum IV_enum)iSub)==IV_RESET ){ - sqlite3_instvfs_reset(p); - } - if( ((enum IV_enum)iSub)==IV_REPORT ){ - int ii; - Tcl_Obj *pRet = Tcl_NewObj(); - - const char *zName = (char *)1; - sqlite3_int64 nClick; - int nCall; - for(ii=1; zName; ii++){ - sqlite3_instvfs_get(p, ii, &zName, &nClick, &nCall); - if( zName ){ - Tcl_Obj *pElem = Tcl_NewObj(); - Tcl_ListObjAppendElement(0, pElem, Tcl_NewStringObj(zName, -1)); - Tcl_ListObjAppendElement(0, pElem, Tcl_NewIntObj(nCall)); - Tcl_ListObjAppendElement(0, pElem, Tcl_NewWideIntObj(nClick)); - Tcl_ListObjAppendElement(0, pRet, pElem); - } - } - - Tcl_SetObjResult(interp, pRet); - } - break; + }; + + case VL_NEW: { + int rc; + char *zVfs; + char *zParent; + char *zLog; + if( objc!=5 ){ + Tcl_WrongNumArgs(interp, 2, objv, "VFS PARENT LOGFILE"); + return TCL_ERROR; + } + zVfs = Tcl_GetString(objv[2]); + zParent = Tcl_GetString(objv[3]); + zLog = Tcl_GetString(objv[4]); + if( *zParent=='\0' ) zParent = 0; + rc = sqlite3_vfslog_new(zVfs, zParent, zLog); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "failed", 0); + return TCL_ERROR; + } + break; + }; + + case VL_REGISTER: { + char *zDb; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "DB"); + return TCL_ERROR; + } +#ifdef SQLITE_OMIT_VIRTUALTABLE + Tcl_AppendResult(interp, "vfslog not available because of " + "SQLITE_OMIT_VIRTUALTABLE", (void*)0); + return TCL_ERROR; +#else + zDb = Tcl_GetString(objv[2]); + if( Tcl_GetCommandInfo(interp, zDb, &cmdInfo) ){ + db = ((struct SqliteDb*)cmdInfo.objClientData)->db; + rc = sqlite3_vfslog_register(db); + } + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "bad sqlite3 handle: ", zDb, (void*)0); + return TCL_ERROR; + } + break; +#endif } } return TCL_OK; } -#endif /* SQLITE_ENABLE_INSTVFS */ - -/* Alternative implementation of sqlite3_instvfs when the real -** implementation is unavailable. -*/ -#ifndef SQLITE_ENABLE_INSTVFS -static int test_sqlite3_instvfs( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - Tcl_AppendResult(interp, - "not compiled with -DSQLITE_ENABLE_INSTVFS; sqlite3_instvfs is " - "unavailable", (char*)0); - return TCL_ERROR; -} -#endif /* !defined(SQLITE_ENABLE_INSTVFS) */ int SqlitetestOsinst_Init(Tcl_Interp *interp){ - Tcl_CreateObjCommand(interp, "sqlite3_instvfs", test_sqlite3_instvfs, 0, 0); + Tcl_CreateObjCommand(interp, "vfslog", test_vfslog, 0, 0); return TCL_OK; } diff --git a/src/test_stat.c b/src/test_stat.c new file mode 100644 index 0000000..894be80 --- /dev/null +++ b/src/test_stat.c @@ -0,0 +1,609 @@ +/* +** 2010 July 12 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains an implementation of the "dbstat" virtual table. +** +** The dbstat virtual table is used to extract low-level formatting +** information from an SQLite database in order to implement the +** "sqlite3_analyzer" utility. See the ../tool/spaceanal.tcl script +** for an example implementation. +*/ + +#include "sqliteInt.h" + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +/* +** Page paths: +** +** The value of the 'path' column describes the path taken from the +** root-node of the b-tree structure to each page. The value of the +** root-node path is '/'. +** +** The value of the path for the left-most child page of the root of +** a b-tree is '/000/'. (Btrees store content ordered from left to right +** so the pages to the left have smaller keys than the pages to the right.) +** The next to left-most child of the root page is +** '/001', and so on, each sibling page identified by a 3-digit hex +** value. The children of the 451st left-most sibling have paths such +** as '/1c2/000/, '/1c2/001/' etc. +** +** Overflow pages are specified by appending a '+' character and a +** six-digit hexadecimal value to the path to the cell they are linked +** from. For example, the three overflow pages in a chain linked from +** the left-most cell of the 450th child of the root page are identified +** by the paths: +** +** '/1c2/000+000000' // First page in overflow chain +** '/1c2/000+000001' // Second page in overflow chain +** '/1c2/000+000002' // Third page in overflow chain +** +** If the paths are sorted using the BINARY collation sequence, then +** the overflow pages associated with a cell will appear earlier in the +** sort-order than its child page: +** +** '/1c2/000/' // Left-most child of 451st child of root +*/ +#define VTAB_SCHEMA \ + "CREATE TABLE xx( " \ + " name STRING, /* Name of table or index */" \ + " path INTEGER, /* Path to page from root */" \ + " pageno INTEGER, /* Page number */" \ + " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */" \ + " ncell INTEGER, /* Cells on page (0 for overflow) */" \ + " payload INTEGER, /* Bytes of payload on this page */" \ + " unused INTEGER, /* Bytes of unused space on this page */" \ + " mx_payload INTEGER /* Largest payload size of all cells */" \ + ");" + +#if 0 +#define VTAB_SCHEMA2 \ + "CREATE TABLE yy( " \ + " pageno INTEGER, /* B-tree page number */" \ + " cellno INTEGER, /* Cell number within page */" \ + " local INTEGER, /* Bytes of content stored locally */" \ + " payload INTEGER, /* Total cell payload size */" \ + " novfl INTEGER /* Number of overflow pages */" \ + ");" +#endif + + +typedef struct StatTable StatTable; +typedef struct StatCursor StatCursor; +typedef struct StatPage StatPage; +typedef struct StatCell StatCell; + +struct StatCell { + int nLocal; /* Bytes of local payload */ + u32 iChildPg; /* Child node (or 0 if this is a leaf) */ + int nOvfl; /* Entries in aOvfl[] */ + u32 *aOvfl; /* Array of overflow page numbers */ + int nLastOvfl; /* Bytes of payload on final overflow page */ + int iOvfl; /* Iterates through aOvfl[] */ +}; + +struct StatPage { + u32 iPgno; + DbPage *pPg; + int iCell; + + char *zPath; /* Path to this page */ + + /* Variables populated by statDecodePage(): */ + u8 flags; /* Copy of flags byte */ + int nCell; /* Number of cells on page */ + int nUnused; /* Number of unused bytes on page */ + StatCell *aCell; /* Array of parsed cells */ + u32 iRightChildPg; /* Right-child page number (or 0) */ + int nMxPayload; /* Largest payload of any cell on this page */ +}; + +struct StatCursor { + sqlite3_vtab_cursor base; + sqlite3_stmt *pStmt; /* Iterates through set of root pages */ + int isEof; /* After pStmt has returned SQLITE_DONE */ + + StatPage aPage[32]; + int iPage; /* Current entry in aPage[] */ + + /* Values to return. */ + char *zName; /* Value of 'name' column */ + char *zPath; /* Value of 'path' column */ + u32 iPageno; /* Value of 'pageno' column */ + char *zPagetype; /* Value of 'pagetype' column */ + int nCell; /* Value of 'ncell' column */ + int nPayload; /* Value of 'payload' column */ + int nUnused; /* Value of 'unused' column */ + int nMxPayload; /* Value of 'mx_payload' column */ +}; + +struct StatTable { + sqlite3_vtab base; + sqlite3 *db; +}; + +#ifndef get2byte +# define get2byte(x) ((x)[0]<<8 | (x)[1]) +#endif + +/* +** Connect to or create a statvfs virtual table. +*/ +static int statConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + StatTable *pTab; + + pTab = (StatTable *)sqlite3_malloc(sizeof(StatTable)); + memset(pTab, 0, sizeof(StatTable)); + pTab->db = db; + + sqlite3_declare_vtab(db, VTAB_SCHEMA); + *ppVtab = &pTab->base; + return SQLITE_OK; +} + +/* +** Disconnect from or destroy a statvfs virtual table. +*/ +static int statDisconnect(sqlite3_vtab *pVtab){ + sqlite3_free(pVtab); + return SQLITE_OK; +} + +/* +** There is no "best-index". This virtual table always does a linear +** scan of the binary VFS log file. +*/ +static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + + /* Records are always returned in ascending order of (name, path). + ** If this will satisfy the client, set the orderByConsumed flag so that + ** SQLite does not do an external sort. + */ + if( ( pIdxInfo->nOrderBy==1 + && pIdxInfo->aOrderBy[0].iColumn==0 + && pIdxInfo->aOrderBy[0].desc==0 + ) || + ( pIdxInfo->nOrderBy==2 + && pIdxInfo->aOrderBy[0].iColumn==0 + && pIdxInfo->aOrderBy[0].desc==0 + && pIdxInfo->aOrderBy[1].iColumn==1 + && pIdxInfo->aOrderBy[1].desc==0 + ) + ){ + pIdxInfo->orderByConsumed = 1; + } + + pIdxInfo->estimatedCost = 10.0; + return SQLITE_OK; +} + +/* +** Open a new statvfs cursor. +*/ +static int statOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + StatTable *pTab = (StatTable *)pVTab; + StatCursor *pCsr; + int rc; + + pCsr = (StatCursor *)sqlite3_malloc(sizeof(StatCursor)); + memset(pCsr, 0, sizeof(StatCursor)); + pCsr->base.pVtab = pVTab; + + rc = sqlite3_prepare_v2(pTab->db, + "SELECT 'sqlite_master' AS name, 1 AS rootpage, 'table' AS type" + " UNION ALL " + "SELECT name, rootpage, type FROM sqlite_master WHERE rootpage!=0" + " ORDER BY name", -1, + &pCsr->pStmt, 0 + ); + if( rc!=SQLITE_OK ){ + sqlite3_free(pCsr); + return rc; + } + + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + return SQLITE_OK; +} + +static void statClearPage(StatPage *p){ + int i; + for(i=0; inCell; i++){ + sqlite3_free(p->aCell[i].aOvfl); + } + sqlite3PagerUnref(p->pPg); + sqlite3_free(p->aCell); + sqlite3_free(p->zPath); + memset(p, 0, sizeof(StatPage)); +} + +static void statResetCsr(StatCursor *pCsr){ + int i; + sqlite3_reset(pCsr->pStmt); + for(i=0; iaPage); i++){ + statClearPage(&pCsr->aPage[i]); + } + pCsr->iPage = 0; + sqlite3_free(pCsr->zPath); + pCsr->zPath = 0; +} + +/* +** Close a statvfs cursor. +*/ +static int statClose(sqlite3_vtab_cursor *pCursor){ + StatCursor *pCsr = (StatCursor *)pCursor; + statResetCsr(pCsr); + sqlite3_finalize(pCsr->pStmt); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +static void getLocalPayload( + int nUsable, /* Usable bytes per page */ + u8 flags, /* Page flags */ + int nTotal, /* Total record (payload) size */ + int *pnLocal /* OUT: Bytes stored locally */ +){ + int nLocal; + int nMinLocal; + int nMaxLocal; + + if( flags==0x0D ){ /* Table leaf node */ + nMinLocal = (nUsable - 12) * 32 / 255 - 23; + nMaxLocal = nUsable - 35; + }else{ /* Index interior and leaf nodes */ + nMinLocal = (nUsable - 12) * 32 / 255 - 23; + nMaxLocal = (nUsable - 12) * 64 / 255 - 23; + } + + nLocal = nMinLocal + (nTotal - nMinLocal) % (nUsable - 4); + if( nLocal>nMaxLocal ) nLocal = nMinLocal; + *pnLocal = nLocal; +} + +static int statDecodePage(Btree *pBt, StatPage *p){ + int nUnused; + int iOff; + int nHdr; + int isLeaf; + + u8 *aData = sqlite3PagerGetData(p->pPg); + u8 *aHdr = &aData[p->iPgno==1 ? 100 : 0]; + + p->flags = aHdr[0]; + p->nCell = get2byte(&aHdr[3]); + p->nMxPayload = 0; + + isLeaf = (p->flags==0x0A || p->flags==0x0D); + nHdr = 12 - isLeaf*4 + (p->iPgno==1)*100; + + nUnused = get2byte(&aHdr[5]) - nHdr - 2*p->nCell; + nUnused += (int)aHdr[7]; + iOff = get2byte(&aHdr[1]); + while( iOff ){ + nUnused += get2byte(&aData[iOff+2]); + iOff = get2byte(&aData[iOff]); + } + p->nUnused = nUnused; + p->iRightChildPg = isLeaf ? 0 : sqlite3Get4byte(&aHdr[8]); + + if( p->nCell ){ + int i; /* Used to iterate through cells */ + int nUsable = sqlite3BtreeGetPageSize(pBt) - sqlite3BtreeGetReserve(pBt); + + p->aCell = sqlite3_malloc((p->nCell+1) * sizeof(StatCell)); + memset(p->aCell, 0, (p->nCell+1) * sizeof(StatCell)); + + for(i=0; inCell; i++){ + StatCell *pCell = &p->aCell[i]; + + iOff = get2byte(&aData[nHdr+i*2]); + if( !isLeaf ){ + pCell->iChildPg = sqlite3Get4byte(&aData[iOff]); + iOff += 4; + } + if( p->flags==0x05 ){ + /* A table interior node. nPayload==0. */ + }else{ + u32 nPayload; /* Bytes of payload total (local+overflow) */ + int nLocal; /* Bytes of payload stored locally */ + iOff += getVarint32(&aData[iOff], nPayload); + if( p->flags==0x0D ){ + u64 dummy; + iOff += sqlite3GetVarint(&aData[iOff], &dummy); + } + if( nPayload>p->nMxPayload ) p->nMxPayload = nPayload; + getLocalPayload(nUsable, p->flags, nPayload, &nLocal); + pCell->nLocal = nLocal; + assert( nPayload>=nLocal ); + assert( nLocal<=(nUsable-35) ); + if( nPayload>nLocal ){ + int j; + int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4); + pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4); + pCell->nOvfl = nOvfl; + pCell->aOvfl = sqlite3_malloc(sizeof(u32)*nOvfl); + pCell->aOvfl[0] = sqlite3Get4byte(&aData[iOff+nLocal]); + for(j=1; jaOvfl[j-1]; + DbPage *pPg = 0; + rc = sqlite3PagerGet(sqlite3BtreePager(pBt), iPrev, &pPg); + if( rc!=SQLITE_OK ){ + assert( pPg==0 ); + return rc; + } + pCell->aOvfl[j] = sqlite3Get4byte(sqlite3PagerGetData(pPg)); + sqlite3PagerUnref(pPg); + } + } + } + } + } + + return SQLITE_OK; +} + +/* +** Move a statvfs cursor to the next entry in the file. +*/ +static int statNext(sqlite3_vtab_cursor *pCursor){ + int rc; + int nPayload; + StatCursor *pCsr = (StatCursor *)pCursor; + StatTable *pTab = (StatTable *)pCursor->pVtab; + Btree *pBt = pTab->db->aDb[0].pBt; + Pager *pPager = sqlite3BtreePager(pBt); + + sqlite3_free(pCsr->zPath); + pCsr->zPath = 0; + + if( pCsr->aPage[0].pPg==0 ){ + rc = sqlite3_step(pCsr->pStmt); + if( rc==SQLITE_ROW ){ + u32 iRoot = sqlite3_column_int64(pCsr->pStmt, 1); + rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg); + pCsr->aPage[0].iPgno = iRoot; + pCsr->aPage[0].iCell = 0; + pCsr->aPage[0].zPath = sqlite3_mprintf("/"); + pCsr->iPage = 0; + }else{ + pCsr->isEof = 1; + return sqlite3_reset(pCsr->pStmt); + } + }else{ + + /* Page p itself has already been visited. */ + StatPage *p = &pCsr->aPage[pCsr->iPage]; + + while( p->iCellnCell ){ + StatCell *pCell = &p->aCell[p->iCell]; + if( pCell->iOvflnOvfl ){ + int nUsable = sqlite3BtreeGetPageSize(pBt)-sqlite3BtreeGetReserve(pBt); + pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0); + pCsr->iPageno = pCell->aOvfl[pCell->iOvfl]; + pCsr->zPagetype = "overflow"; + pCsr->nCell = 0; + pCsr->nMxPayload = 0; + pCsr->zPath = sqlite3_mprintf( + "%s%.3x+%.6x", p->zPath, p->iCell, pCell->iOvfl + ); + if( pCell->iOvflnOvfl-1 ){ + pCsr->nUnused = 0; + pCsr->nPayload = nUsable - 4; + }else{ + pCsr->nPayload = pCell->nLastOvfl; + pCsr->nUnused = nUsable - 4 - pCsr->nPayload; + } + pCell->iOvfl++; + return SQLITE_OK; + } + if( p->iRightChildPg ) break; + p->iCell++; + } + + while( !p->iRightChildPg || p->iCell>p->nCell ){ + statClearPage(p); + if( pCsr->iPage==0 ) return statNext(pCursor); + pCsr->iPage--; + p = &pCsr->aPage[pCsr->iPage]; + } + pCsr->iPage++; + assert( p==&pCsr->aPage[pCsr->iPage-1] ); + + if( p->iCell==p->nCell ){ + p[1].iPgno = p->iRightChildPg; + }else{ + p[1].iPgno = p->aCell[p->iCell].iChildPg; + } + rc = sqlite3PagerGet(pPager, p[1].iPgno, &p[1].pPg); + p[1].iCell = 0; + p[1].zPath = sqlite3_mprintf("%s%.3x/", p->zPath, p->iCell); + p->iCell++; + } + + + /* Populate the StatCursor fields with the values to be returned + ** by the xColumn() and xRowid() methods. + */ + if( rc==SQLITE_OK ){ + int i; + StatPage *p = &pCsr->aPage[pCsr->iPage]; + pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0); + pCsr->iPageno = p->iPgno; + + statDecodePage(pBt, p); + + switch( p->flags ){ + case 0x05: /* table internal */ + case 0x02: /* index internal */ + pCsr->zPagetype = "internal"; + break; + case 0x0D: /* table leaf */ + case 0x0A: /* index leaf */ + pCsr->zPagetype = "leaf"; + break; + default: + pCsr->zPagetype = "corrupted"; + break; + } + pCsr->nCell = p->nCell; + pCsr->nUnused = p->nUnused; + pCsr->nMxPayload = p->nMxPayload; + pCsr->zPath = sqlite3_mprintf("%s", p->zPath); + nPayload = 0; + for(i=0; inCell; i++){ + nPayload += p->aCell[i].nLocal; + } + pCsr->nPayload = nPayload; + } + + return rc; +} + +static int statEof(sqlite3_vtab_cursor *pCursor){ + StatCursor *pCsr = (StatCursor *)pCursor; + return pCsr->isEof; +} + +static int statFilter( + sqlite3_vtab_cursor *pCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + sqlite3 *db = ((StatTable *)(pCursor->pVtab))->db; + StatCursor *pCsr = (StatCursor *)pCursor; + int nPage = 0; + + statResetCsr((StatCursor *)pCursor); + sqlite3PagerPagecount(sqlite3BtreePager(db->aDb[0].pBt), &nPage); + if( nPage==0 ){ + pCsr->isEof = 1; + return SQLITE_OK; + } + + return statNext(pCursor); +} + +static int statColumn( + sqlite3_vtab_cursor *pCursor, + sqlite3_context *ctx, + int i +){ + StatCursor *pCsr = (StatCursor *)pCursor; + switch( i ){ + case 0: /* name */ + sqlite3_result_text(ctx, pCsr->zName, -1, SQLITE_STATIC); + break; + case 1: /* path */ + sqlite3_result_text(ctx, pCsr->zPath, -1, SQLITE_TRANSIENT); + break; + case 2: /* pageno */ + sqlite3_result_int64(ctx, pCsr->iPageno); + break; + case 3: /* pagetype */ + sqlite3_result_text(ctx, pCsr->zPagetype, -1, SQLITE_STATIC); + break; + case 4: /* ncell */ + sqlite3_result_int(ctx, pCsr->nCell); + break; + case 5: /* payload */ + sqlite3_result_int(ctx, pCsr->nPayload); + break; + case 6: /* unused */ + sqlite3_result_int(ctx, pCsr->nUnused); + break; + case 7: /* mx_payload */ + sqlite3_result_int(ctx, pCsr->nMxPayload); + break; + } + return SQLITE_OK; +} + +static int statRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + StatCursor *pCsr = (StatCursor *)pCursor; + *pRowid = pCsr->iPageno; + return SQLITE_OK; +} + +int sqlite3_dbstat_register(sqlite3 *db){ + static sqlite3_module dbstat_module = { + 0, /* iVersion */ + statConnect, /* xCreate */ + statConnect, /* xConnect */ + statBestIndex, /* xBestIndex */ + statDisconnect, /* xDisconnect */ + statDisconnect, /* xDestroy */ + statOpen, /* xOpen - open a cursor */ + statClose, /* xClose - close a cursor */ + statFilter, /* xFilter - configure scan constraints */ + statNext, /* xNext - advance a cursor */ + statEof, /* xEof - check for end of scan */ + statColumn, /* xColumn - read data */ + statRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ + }; + sqlite3_create_module(db, "dbstat", &dbstat_module, 0); + return SQLITE_OK; +} + +#endif + +#ifdef SQLITE_TEST +#include + +static int test_dbstat( + void *clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ +#ifdef SQLITE_OMIT_VIRTUALTABLE + Tcl_AppendResult(interp, "dbstat not available because of " + "SQLITE_OMIT_VIRTUALTABLE", (void*)0); + return TCL_ERROR; +#else + struct SqliteDb { sqlite3 *db; }; + char *zDb; + Tcl_CmdInfo cmdInfo; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + + zDb = Tcl_GetString(objv[1]); + if( Tcl_GetCommandInfo(interp, zDb, &cmdInfo) ){ + sqlite3* db = ((struct SqliteDb*)cmdInfo.objClientData)->db; + sqlite3_dbstat_register(db); + } + return TCL_OK; +#endif +} + +int SqlitetestStat_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "register_dbstat_vtab", test_dbstat, 0, 0); + return TCL_OK; +} +#endif diff --git a/src/test_thread.c b/src/test_thread.c index 6a319fa..af38d91 100644 --- a/src/test_thread.c +++ b/src/test_thread.c @@ -58,6 +58,7 @@ static Tcl_ObjCmdProc blocking_step_proc; static Tcl_ObjCmdProc blocking_prepare_v2_proc; #endif int Sqlitetest1_Init(Tcl_Interp *); +int Sqlite3_Init(Tcl_Interp *); /* Functions from test1.c */ void *sqlite3TestTextToPtr(const char *); @@ -124,6 +125,7 @@ static Tcl_ThreadCreateType tclScriptThread(ClientData pSqlThread){ #endif Sqlitetest1_Init(interp); Sqlitetest_mutex_Init(interp); + Sqlite3_Init(interp); rc = Tcl_Eval(interp, p->zScript); pRes = Tcl_GetObjResult(interp); @@ -148,6 +150,8 @@ static Tcl_ThreadCreateType tclScriptThread(ClientData pSqlThread){ Tcl_DecrRefCount(pList); Tcl_DecrRefCount(pRes); Tcl_DeleteInterp(interp); + while( Tcl_DoOneEvent(TCL_ALL_EVENTS|TCL_DONT_WAIT) ); + Tcl_ExitThread(0); TCL_THREAD_CREATE_RETURN; } diff --git a/src/test_vfs.c b/src/test_vfs.c new file mode 100644 index 0000000..f577bf5 --- /dev/null +++ b/src/test_vfs.c @@ -0,0 +1,1408 @@ +/* +** 2010 May 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +*/ +#if SQLITE_TEST /* This file is used for testing only */ + +/* +** This file contains the implementation of the Tcl [testvfs] command, +** used to create SQLite VFS implementations with various properties and +** instrumentation to support testing SQLite. +** +** testvfs VFSNAME ?OPTIONS? +** +** Available options are: +** +** -noshm BOOLEAN (True to omit shm methods. Default false) +** -default BOOLEAN (True to make the vfs default. Default false) +** -szosfile INTEGER (Value for sqlite3_vfs.szOsFile) +** -mxpathname INTEGER (Value for sqlite3_vfs.mxPathname) +*/ + +#include "sqlite3.h" +#include "sqliteInt.h" + +typedef struct Testvfs Testvfs; +typedef struct TestvfsShm TestvfsShm; +typedef struct TestvfsBuffer TestvfsBuffer; +typedef struct TestvfsFile TestvfsFile; +typedef struct TestvfsFd TestvfsFd; + +/* +** An open file handle. +*/ +struct TestvfsFile { + sqlite3_file base; /* Base class. Must be first */ + TestvfsFd *pFd; /* File data */ +}; +#define tvfsGetFd(pFile) (((TestvfsFile *)pFile)->pFd) + +struct TestvfsFd { + sqlite3_vfs *pVfs; /* The VFS */ + const char *zFilename; /* Filename as passed to xOpen() */ + sqlite3_file *pReal; /* The real, underlying file descriptor */ + Tcl_Obj *pShmId; /* Shared memory id for Tcl callbacks */ + + TestvfsBuffer *pShm; /* Shared memory buffer */ + u32 excllock; /* Mask of exclusive locks */ + u32 sharedlock; /* Mask of shared locks */ + TestvfsFd *pNext; /* Next handle opened on the same file */ +}; + + +#define FAULT_INJECT_NONE 0 +#define FAULT_INJECT_TRANSIENT 1 +#define FAULT_INJECT_PERSISTENT 2 + +typedef struct TestFaultInject TestFaultInject; +struct TestFaultInject { + int iCnt; /* Remaining calls before fault injection */ + int eFault; /* A FAULT_INJECT_* value */ + int nFail; /* Number of faults injected */ +}; + +/* +** An instance of this structure is allocated for each VFS created. The +** sqlite3_vfs.pAppData field of the VFS structure registered with SQLite +** is set to point to it. +*/ +struct Testvfs { + char *zName; /* Name of this VFS */ + sqlite3_vfs *pParent; /* The VFS to use for file IO */ + sqlite3_vfs *pVfs; /* The testvfs registered with SQLite */ + Tcl_Interp *interp; /* Interpreter to run script in */ + Tcl_Obj *pScript; /* Script to execute */ + int nScript; /* Number of elements in array apScript */ + Tcl_Obj **apScript; /* Array version of pScript */ + TestvfsBuffer *pBuffer; /* List of shared buffers */ + int isNoshm; + + int mask; /* Mask controlling [script] and [ioerr] */ + + TestFaultInject ioerr_err; + TestFaultInject full_err; + TestFaultInject cantopen_err; + +#if 0 + int iIoerrCnt; + int ioerr; + int nIoerrFail; + int iFullCnt; + int fullerr; + int nFullFail; +#endif + + int iDevchar; + int iSectorsize; +}; + +/* +** The Testvfs.mask variable is set to a combination of the following. +** If a bit is clear in Testvfs.mask, then calls made by SQLite to the +** corresponding VFS method is ignored for purposes of: +** +** + Simulating IO errors, and +** + Invoking the Tcl callback script. +*/ +#define TESTVFS_SHMOPEN_MASK 0x00000001 +#define TESTVFS_SHMLOCK_MASK 0x00000010 +#define TESTVFS_SHMMAP_MASK 0x00000020 +#define TESTVFS_SHMBARRIER_MASK 0x00000040 +#define TESTVFS_SHMCLOSE_MASK 0x00000080 + +#define TESTVFS_OPEN_MASK 0x00000100 +#define TESTVFS_SYNC_MASK 0x00000200 +#define TESTVFS_DELETE_MASK 0x00000400 +#define TESTVFS_CLOSE_MASK 0x00000800 +#define TESTVFS_WRITE_MASK 0x00001000 +#define TESTVFS_TRUNCATE_MASK 0x00002000 +#define TESTVFS_ACCESS_MASK 0x00004000 +#define TESTVFS_ALL_MASK 0x00007FFF + + +#define TESTVFS_MAX_PAGES 1024 + +/* +** A shared-memory buffer. There is one of these objects for each shared +** memory region opened by clients. If two clients open the same file, +** there are two TestvfsFile structures but only one TestvfsBuffer structure. +*/ +struct TestvfsBuffer { + char *zFile; /* Associated file name */ + int pgsz; /* Page size */ + u8 *aPage[TESTVFS_MAX_PAGES]; /* Array of ckalloc'd pages */ + TestvfsFd *pFile; /* List of open handles */ + TestvfsBuffer *pNext; /* Next in linked list of all buffers */ +}; + + +#define PARENTVFS(x) (((Testvfs *)((x)->pAppData))->pParent) + +#define TESTVFS_MAX_ARGS 12 + + +/* +** Method declarations for TestvfsFile. +*/ +static int tvfsClose(sqlite3_file*); +static int tvfsRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int tvfsWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int tvfsTruncate(sqlite3_file*, sqlite3_int64 size); +static int tvfsSync(sqlite3_file*, int flags); +static int tvfsFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int tvfsLock(sqlite3_file*, int); +static int tvfsUnlock(sqlite3_file*, int); +static int tvfsCheckReservedLock(sqlite3_file*, int *); +static int tvfsFileControl(sqlite3_file*, int op, void *pArg); +static int tvfsSectorSize(sqlite3_file*); +static int tvfsDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for tvfs_vfs. +*/ +static int tvfsOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int tvfsDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int tvfsAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int tvfsFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +static void *tvfsDlOpen(sqlite3_vfs*, const char *zFilename); +static void tvfsDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*tvfsDlSym(sqlite3_vfs*,void*, const char *zSymbol))(void); +static void tvfsDlClose(sqlite3_vfs*, void*); +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +static int tvfsRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int tvfsSleep(sqlite3_vfs*, int microseconds); +static int tvfsCurrentTime(sqlite3_vfs*, double*); + +static int tvfsShmOpen(sqlite3_file*); +static int tvfsShmLock(sqlite3_file*, int , int, int); +static int tvfsShmMap(sqlite3_file*,int,int,int, void volatile **); +static void tvfsShmBarrier(sqlite3_file*); +static int tvfsShmUnmap(sqlite3_file*, int); + +static sqlite3_io_methods tvfs_io_methods = { + 2, /* iVersion */ + tvfsClose, /* xClose */ + tvfsRead, /* xRead */ + tvfsWrite, /* xWrite */ + tvfsTruncate, /* xTruncate */ + tvfsSync, /* xSync */ + tvfsFileSize, /* xFileSize */ + tvfsLock, /* xLock */ + tvfsUnlock, /* xUnlock */ + tvfsCheckReservedLock, /* xCheckReservedLock */ + tvfsFileControl, /* xFileControl */ + tvfsSectorSize, /* xSectorSize */ + tvfsDeviceCharacteristics, /* xDeviceCharacteristics */ + tvfsShmMap, /* xShmMap */ + tvfsShmLock, /* xShmLock */ + tvfsShmBarrier, /* xShmBarrier */ + tvfsShmUnmap /* xShmUnmap */ +}; + +static int tvfsResultCode(Testvfs *p, int *pRc){ + struct errcode { + int eCode; + const char *zCode; + } aCode[] = { + { SQLITE_OK, "SQLITE_OK" }, + { SQLITE_ERROR, "SQLITE_ERROR" }, + { SQLITE_IOERR, "SQLITE_IOERR" }, + { SQLITE_LOCKED, "SQLITE_LOCKED" }, + { SQLITE_BUSY, "SQLITE_BUSY" }, + }; + + const char *z; + int i; + + z = Tcl_GetStringResult(p->interp); + for(i=0; ieFault ){ + p->iCnt--; + if( p->iCnt==0 || (p->iCnt<0 && p->eFault==FAULT_INJECT_PERSISTENT ) ){ + ret = 1; + p->nFail++; + } + } + return ret; +} + + +static int tvfsInjectIoerr(Testvfs *p){ + return tvfsInjectFault(&p->ioerr_err); +} + +static int tvfsInjectFullerr(Testvfs *p){ + return tvfsInjectFault(&p->full_err); +} +static int tvfsInjectCantopenerr(Testvfs *p){ + return tvfsInjectFault(&p->cantopen_err); +} + + +static void tvfsExecTcl( + Testvfs *p, + const char *zMethod, + Tcl_Obj *arg1, + Tcl_Obj *arg2, + Tcl_Obj *arg3 +){ + int rc; /* Return code from Tcl_EvalObj() */ + int nArg; /* Elements in eval'd list */ + int nScript; + Tcl_Obj ** ap; + + assert( p->pScript ); + + if( !p->apScript ){ + int nByte; + int i; + if( TCL_OK!=Tcl_ListObjGetElements(p->interp, p->pScript, &nScript, &ap) ){ + Tcl_BackgroundError(p->interp); + Tcl_ResetResult(p->interp); + return; + } + p->nScript = nScript; + nByte = (nScript+TESTVFS_MAX_ARGS)*sizeof(Tcl_Obj *); + p->apScript = (Tcl_Obj **)ckalloc(nByte); + memset(p->apScript, 0, nByte); + for(i=0; iapScript[i] = ap[i]; + } + } + + p->apScript[p->nScript] = Tcl_NewStringObj(zMethod, -1); + p->apScript[p->nScript+1] = arg1; + p->apScript[p->nScript+2] = arg2; + p->apScript[p->nScript+3] = arg3; + + for(nArg=p->nScript; p->apScript[nArg]; nArg++){ + Tcl_IncrRefCount(p->apScript[nArg]); + } + + rc = Tcl_EvalObjv(p->interp, nArg, p->apScript, TCL_EVAL_GLOBAL); + if( rc!=TCL_OK ){ + Tcl_BackgroundError(p->interp); + Tcl_ResetResult(p->interp); + } + + for(nArg=p->nScript; p->apScript[nArg]; nArg++){ + Tcl_DecrRefCount(p->apScript[nArg]); + p->apScript[nArg] = 0; + } +} + + +/* +** Close an tvfs-file. +*/ +static int tvfsClose(sqlite3_file *pFile){ + int rc; + TestvfsFile *pTestfile = (TestvfsFile *)pFile; + TestvfsFd *pFd = pTestfile->pFd; + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + + if( p->pScript && p->mask&TESTVFS_CLOSE_MASK ){ + tvfsExecTcl(p, "xClose", + Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, 0 + ); + } + + if( pFd->pShmId ){ + Tcl_DecrRefCount(pFd->pShmId); + pFd->pShmId = 0; + } + if( pFile->pMethods ){ + ckfree((char *)pFile->pMethods); + } + rc = sqlite3OsClose(pFd->pReal); + ckfree((char *)pFd); + pTestfile->pFd = 0; + return rc; +} + +/* +** Read data from an tvfs-file. +*/ +static int tvfsRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); +} + +/* +** Write data to an tvfs-file. +*/ +static int tvfsWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + + if( p->pScript && p->mask&TESTVFS_WRITE_MASK ){ + tvfsExecTcl(p, "xWrite", + Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, 0 + ); + tvfsResultCode(p, &rc); + } + + if( rc==SQLITE_OK && tvfsInjectFullerr(p) ){ + rc = SQLITE_FULL; + } + if( rc==SQLITE_OK && p->mask&TESTVFS_WRITE_MASK && tvfsInjectIoerr(p) ){ + rc = SQLITE_IOERR; + } + + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pFd->pReal, zBuf, iAmt, iOfst); + } + return rc; +} + +/* +** Truncate an tvfs-file. +*/ +static int tvfsTruncate(sqlite3_file *pFile, sqlite_int64 size){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + + if( p->pScript && p->mask&TESTVFS_TRUNCATE_MASK ){ + tvfsExecTcl(p, "xTruncate", + Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, 0 + ); + tvfsResultCode(p, &rc); + } + + if( rc==SQLITE_OK ){ + rc = sqlite3OsTruncate(pFd->pReal, size); + } + return rc; +} + +/* +** Sync an tvfs-file. +*/ +static int tvfsSync(sqlite3_file *pFile, int flags){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + + if( p->pScript && p->mask&TESTVFS_SYNC_MASK ){ + char *zFlags; + + switch( flags ){ + case SQLITE_SYNC_NORMAL: + zFlags = "normal"; + break; + case SQLITE_SYNC_FULL: + zFlags = "full"; + break; + case SQLITE_SYNC_NORMAL|SQLITE_SYNC_DATAONLY: + zFlags = "normal|dataonly"; + break; + case SQLITE_SYNC_FULL|SQLITE_SYNC_DATAONLY: + zFlags = "full|dataonly"; + break; + default: + assert(0); + } + + tvfsExecTcl(p, "xSync", + Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, + Tcl_NewStringObj(zFlags, -1) + ); + tvfsResultCode(p, &rc); + } + + if( rc==SQLITE_OK && tvfsInjectFullerr(p) ) rc = SQLITE_FULL; + + if( rc==SQLITE_OK ){ + rc = sqlite3OsSync(pFd->pReal, flags); + } + + return rc; +} + +/* +** Return the current file-size of an tvfs-file. +*/ +static int tvfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsFileSize(p->pReal, pSize); +} + +/* +** Lock an tvfs-file. +*/ +static int tvfsLock(sqlite3_file *pFile, int eLock){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsLock(p->pReal, eLock); +} + +/* +** Unlock an tvfs-file. +*/ +static int tvfsUnlock(sqlite3_file *pFile, int eLock){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsUnlock(p->pReal, eLock); +} + +/* +** Check if another file-handle holds a RESERVED lock on an tvfs-file. +*/ +static int tvfsCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsCheckReservedLock(p->pReal, pResOut); +} + +/* +** File control method. For custom operations on an tvfs-file. +*/ +static int tvfsFileControl(sqlite3_file *pFile, int op, void *pArg){ + TestvfsFd *p = tvfsGetFd(pFile); + return sqlite3OsFileControl(p->pReal, op, pArg); +} + +/* +** Return the sector-size in bytes for an tvfs-file. +*/ +static int tvfsSectorSize(sqlite3_file *pFile){ + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + if( p->iSectorsize>=0 ){ + return p->iSectorsize; + } + return sqlite3OsSectorSize(pFd->pReal); +} + +/* +** Return the device characteristic flags supported by an tvfs-file. +*/ +static int tvfsDeviceCharacteristics(sqlite3_file *pFile){ + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; + if( p->iDevchar>=0 ){ + return p->iDevchar; + } + return sqlite3OsDeviceCharacteristics(pFd->pReal); +} + +/* +** Open an tvfs file handle. +*/ +static int tvfsOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + int rc; + TestvfsFile *pTestfile = (TestvfsFile *)pFile; + TestvfsFd *pFd; + Tcl_Obj *pId = 0; + Testvfs *p = (Testvfs *)pVfs->pAppData; + + pFd = (TestvfsFd *)ckalloc(sizeof(TestvfsFd) + PARENTVFS(pVfs)->szOsFile); + memset(pFd, 0, sizeof(TestvfsFd) + PARENTVFS(pVfs)->szOsFile); + pFd->pShm = 0; + pFd->pShmId = 0; + pFd->zFilename = zName; + pFd->pVfs = pVfs; + pFd->pReal = (sqlite3_file *)&pFd[1]; + pTestfile->pFd = pFd; + + /* Evaluate the Tcl script: + ** + ** SCRIPT xOpen FILENAME + ** + ** If the script returns an SQLite error code other than SQLITE_OK, an + ** error is returned to the caller. If it returns SQLITE_OK, the new + ** connection is named "anon". Otherwise, the value returned by the + ** script is used as the connection name. + */ + Tcl_ResetResult(p->interp); + if( p->pScript && p->mask&TESTVFS_OPEN_MASK ){ + tvfsExecTcl(p, "xOpen", Tcl_NewStringObj(pFd->zFilename, -1), 0, 0); + if( tvfsResultCode(p, &rc) ){ + if( rc!=SQLITE_OK ) return rc; + }else{ + pId = Tcl_GetObjResult(p->interp); + } + } + + if( (p->mask&TESTVFS_OPEN_MASK) && tvfsInjectIoerr(p) ) return SQLITE_IOERR; + if( tvfsInjectCantopenerr(p) ) return SQLITE_CANTOPEN; + if( tvfsInjectFullerr(p) ) return SQLITE_FULL; + + if( !pId ){ + pId = Tcl_NewStringObj("anon", -1); + } + Tcl_IncrRefCount(pId); + pFd->pShmId = pId; + Tcl_ResetResult(p->interp); + + rc = sqlite3OsOpen(PARENTVFS(pVfs), zName, pFd->pReal, flags, pOutFlags); + if( pFd->pReal->pMethods ){ + sqlite3_io_methods *pMethods; + int nByte; + + if( pVfs->iVersion>1 ){ + nByte = sizeof(sqlite3_io_methods); + }else{ + nByte = offsetof(sqlite3_io_methods, xShmMap); + } + + pMethods = (sqlite3_io_methods *)ckalloc(nByte); + memcpy(pMethods, &tvfs_io_methods, nByte); + pMethods->iVersion = pVfs->iVersion; + if( pVfs->iVersion>1 && ((Testvfs *)pVfs->pAppData)->isNoshm ){ + pMethods->xShmUnmap = 0; + pMethods->xShmLock = 0; + pMethods->xShmBarrier = 0; + pMethods->xShmMap = 0; + } + pFile->pMethods = pMethods; + } + + return rc; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int tvfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int rc = SQLITE_OK; + Testvfs *p = (Testvfs *)pVfs->pAppData; + + if( p->pScript && p->mask&TESTVFS_DELETE_MASK ){ + tvfsExecTcl(p, "xDelete", + Tcl_NewStringObj(zPath, -1), Tcl_NewIntObj(dirSync), 0 + ); + tvfsResultCode(p, &rc); + } + if( rc==SQLITE_OK ){ + rc = sqlite3OsDelete(PARENTVFS(pVfs), zPath, dirSync); + } + return rc; +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int tvfsAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + Testvfs *p = (Testvfs *)pVfs->pAppData; + if( p->pScript && p->mask&TESTVFS_ACCESS_MASK ){ + int rc; + char *zArg = 0; + if( flags==SQLITE_ACCESS_EXISTS ) zArg = "SQLITE_ACCESS_EXISTS"; + if( flags==SQLITE_ACCESS_READWRITE ) zArg = "SQLITE_ACCESS_READWRITE"; + if( flags==SQLITE_ACCESS_READ ) zArg = "SQLITE_ACCESS_READ"; + tvfsExecTcl(p, "xAccess", + Tcl_NewStringObj(zPath, -1), Tcl_NewStringObj(zArg, -1), 0 + ); + if( tvfsResultCode(p, &rc) ){ + if( rc!=SQLITE_OK ) return rc; + }else{ + Tcl_Interp *interp = p->interp; + if( TCL_OK==Tcl_GetBooleanFromObj(0, Tcl_GetObjResult(interp), pResOut) ){ + return SQLITE_OK; + } + } + } + return sqlite3OsAccess(PARENTVFS(pVfs), zPath, flags, pResOut); +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (DEVSYM_MAX_PATHNAME+1) bytes. +*/ +static int tvfsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + return sqlite3OsFullPathname(PARENTVFS(pVfs), zPath, nOut, zOut); +} + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *tvfsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return sqlite3OsDlOpen(PARENTVFS(pVfs), zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void tvfsDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + sqlite3OsDlError(PARENTVFS(pVfs), nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*tvfsDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ + return sqlite3OsDlSym(PARENTVFS(pVfs), p, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void tvfsDlClose(sqlite3_vfs *pVfs, void *pHandle){ + sqlite3OsDlClose(PARENTVFS(pVfs), pHandle); +} +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int tvfsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return sqlite3OsRandomness(PARENTVFS(pVfs), nByte, zBufOut); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int tvfsSleep(sqlite3_vfs *pVfs, int nMicro){ + return sqlite3OsSleep(PARENTVFS(pVfs), nMicro); +} + +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int tvfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return PARENTVFS(pVfs)->xCurrentTime(PARENTVFS(pVfs), pTimeOut); +} + +static int tvfsShmOpen(sqlite3_file *pFile){ + Testvfs *p; + int rc = SQLITE_OK; /* Return code */ + TestvfsBuffer *pBuffer; /* Buffer to open connection to */ + TestvfsFd *pFd; /* The testvfs file structure */ + + pFd = tvfsGetFd(pFile); + p = (Testvfs *)pFd->pVfs->pAppData; + assert( pFd->pShmId && pFd->pShm==0 && pFd->pNext==0 ); + + /* Evaluate the Tcl script: + ** + ** SCRIPT xShmOpen FILENAME + */ + Tcl_ResetResult(p->interp); + if( p->pScript && p->mask&TESTVFS_SHMOPEN_MASK ){ + tvfsExecTcl(p, "xShmOpen", Tcl_NewStringObj(pFd->zFilename, -1), 0, 0); + if( tvfsResultCode(p, &rc) ){ + if( rc!=SQLITE_OK ) return rc; + } + } + + assert( rc==SQLITE_OK ); + if( p->mask&TESTVFS_SHMOPEN_MASK && tvfsInjectIoerr(p) ){ + return SQLITE_IOERR; + } + + /* Search for a TestvfsBuffer. Create a new one if required. */ + for(pBuffer=p->pBuffer; pBuffer; pBuffer=pBuffer->pNext){ + if( 0==strcmp(pFd->zFilename, pBuffer->zFile) ) break; + } + if( !pBuffer ){ + int nByte = sizeof(TestvfsBuffer) + strlen(pFd->zFilename) + 1; + pBuffer = (TestvfsBuffer *)ckalloc(nByte); + memset(pBuffer, 0, nByte); + pBuffer->zFile = (char *)&pBuffer[1]; + strcpy(pBuffer->zFile, pFd->zFilename); + pBuffer->pNext = p->pBuffer; + p->pBuffer = pBuffer; + } + + /* Connect the TestvfsBuffer to the new TestvfsShm handle and return. */ + pFd->pNext = pBuffer->pFile; + pBuffer->pFile = pFd; + pFd->pShm = pBuffer; + return SQLITE_OK; +} + +static void tvfsAllocPage(TestvfsBuffer *p, int iPage, int pgsz){ + assert( iPageaPage[iPage]==0 ){ + p->aPage[iPage] = (u8 *)ckalloc(pgsz); + memset(p->aPage[iPage], 0, pgsz); + p->pgsz = pgsz; + } +} + +static int tvfsShmMap( + sqlite3_file *pFile, /* Handle open on database file */ + int iPage, /* Page to retrieve */ + int pgsz, /* Size of pages */ + int isWrite, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)(pFd->pVfs->pAppData); + + if( 0==pFd->pShm ){ + rc = tvfsShmOpen(pFile); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + if( p->pScript && p->mask&TESTVFS_SHMMAP_MASK ){ + Tcl_Obj *pArg = Tcl_NewObj(); + Tcl_IncrRefCount(pArg); + Tcl_ListObjAppendElement(p->interp, pArg, Tcl_NewIntObj(iPage)); + Tcl_ListObjAppendElement(p->interp, pArg, Tcl_NewIntObj(pgsz)); + Tcl_ListObjAppendElement(p->interp, pArg, Tcl_NewIntObj(isWrite)); + tvfsExecTcl(p, "xShmMap", + Tcl_NewStringObj(pFd->pShm->zFile, -1), pFd->pShmId, pArg + ); + tvfsResultCode(p, &rc); + Tcl_DecrRefCount(pArg); + } + if( rc==SQLITE_OK && p->mask&TESTVFS_SHMMAP_MASK && tvfsInjectIoerr(p) ){ + rc = SQLITE_IOERR; + } + + if( rc==SQLITE_OK && isWrite && !pFd->pShm->aPage[iPage] ){ + tvfsAllocPage(pFd->pShm, iPage, pgsz); + } + *pp = (void volatile *)pFd->pShm->aPage[iPage]; + + return rc; +} + + +static int tvfsShmLock( + sqlite3_file *pFile, + int ofst, + int n, + int flags +){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)(pFd->pVfs->pAppData); + int nLock; + char zLock[80]; + + if( p->pScript && p->mask&TESTVFS_SHMLOCK_MASK ){ + sqlite3_snprintf(sizeof(zLock), zLock, "%d %d", ofst, n); + nLock = strlen(zLock); + if( flags & SQLITE_SHM_LOCK ){ + strcpy(&zLock[nLock], " lock"); + }else{ + strcpy(&zLock[nLock], " unlock"); + } + nLock += strlen(&zLock[nLock]); + if( flags & SQLITE_SHM_SHARED ){ + strcpy(&zLock[nLock], " shared"); + }else{ + strcpy(&zLock[nLock], " exclusive"); + } + tvfsExecTcl(p, "xShmLock", + Tcl_NewStringObj(pFd->pShm->zFile, -1), pFd->pShmId, + Tcl_NewStringObj(zLock, -1) + ); + tvfsResultCode(p, &rc); + } + + if( rc==SQLITE_OK && p->mask&TESTVFS_SHMLOCK_MASK && tvfsInjectIoerr(p) ){ + rc = SQLITE_IOERR; + } + + if( rc==SQLITE_OK ){ + int isLock = (flags & SQLITE_SHM_LOCK); + int isExcl = (flags & SQLITE_SHM_EXCLUSIVE); + u32 mask = (((1<pShm->pFile; p2; p2=p2->pNext){ + if( p2==pFd ) continue; + if( (p2->excllock&mask) || (isExcl && p2->sharedlock&mask) ){ + rc = SQLITE_BUSY; + break; + } + } + if( rc==SQLITE_OK ){ + if( isExcl ) pFd->excllock |= mask; + if( !isExcl ) pFd->sharedlock |= mask; + } + }else{ + if( isExcl ) pFd->excllock &= (~mask); + if( !isExcl ) pFd->sharedlock &= (~mask); + } + } + + return rc; +} + +static void tvfsShmBarrier(sqlite3_file *pFile){ + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)(pFd->pVfs->pAppData); + + if( p->pScript && p->mask&TESTVFS_SHMBARRIER_MASK ){ + tvfsExecTcl(p, "xShmBarrier", + Tcl_NewStringObj(pFd->pShm->zFile, -1), pFd->pShmId, 0 + ); + } +} + +static int tvfsShmUnmap( + sqlite3_file *pFile, + int deleteFlag +){ + int rc = SQLITE_OK; + TestvfsFd *pFd = tvfsGetFd(pFile); + Testvfs *p = (Testvfs *)(pFd->pVfs->pAppData); + TestvfsBuffer *pBuffer = pFd->pShm; + TestvfsFd **ppFd; + + if( !pBuffer ) return SQLITE_OK; + assert( pFd->pShmId && pFd->pShm ); + + if( p->pScript && p->mask&TESTVFS_SHMCLOSE_MASK ){ + tvfsExecTcl(p, "xShmUnmap", + Tcl_NewStringObj(pFd->pShm->zFile, -1), pFd->pShmId, 0 + ); + tvfsResultCode(p, &rc); + } + + for(ppFd=&pBuffer->pFile; *ppFd!=pFd; ppFd=&((*ppFd)->pNext)); + assert( (*ppFd)==pFd ); + *ppFd = pFd->pNext; + pFd->pNext = 0; + + if( pBuffer->pFile==0 ){ + int i; + TestvfsBuffer **pp; + for(pp=&p->pBuffer; *pp!=pBuffer; pp=&((*pp)->pNext)); + *pp = (*pp)->pNext; + for(i=0; pBuffer->aPage[i]; i++){ + ckfree((char *)pBuffer->aPage[i]); + } + ckfree((char *)pBuffer); + } + pFd->pShm = 0; + + return rc; +} + +static int testvfs_obj_cmd( + ClientData cd, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Testvfs *p = (Testvfs *)cd; + + enum DB_enum { + CMD_SHM, CMD_DELETE, CMD_FILTER, CMD_IOERR, CMD_SCRIPT, + CMD_DEVCHAR, CMD_SECTORSIZE, CMD_FULLERR, CMD_CANTOPENERR + }; + struct TestvfsSubcmd { + char *zName; + enum DB_enum eCmd; + } aSubcmd[] = { + { "shm", CMD_SHM }, + { "delete", CMD_DELETE }, + { "filter", CMD_FILTER }, + { "ioerr", CMD_IOERR }, + { "fullerr", CMD_FULLERR }, + { "cantopenerr", CMD_CANTOPENERR }, + { "script", CMD_SCRIPT }, + { "devchar", CMD_DEVCHAR }, + { "sectorsize", CMD_SECTORSIZE }, + { 0, 0 } + }; + int i; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUBCOMMAND ..."); + return TCL_ERROR; + } + if( Tcl_GetIndexFromObjStruct( + interp, objv[1], aSubcmd, sizeof(aSubcmd[0]), "subcommand", 0, &i) + ){ + return TCL_ERROR; + } + Tcl_ResetResult(interp); + + switch( aSubcmd[i].eCmd ){ + case CMD_SHM: { + Tcl_Obj *pObj; + int i; + TestvfsBuffer *pBuffer; + char *zName; + if( objc!=3 && objc!=4 ){ + Tcl_WrongNumArgs(interp, 2, objv, "FILE ?VALUE?"); + return TCL_ERROR; + } + zName = ckalloc(p->pParent->mxPathname); + p->pParent->xFullPathname( + p->pParent, Tcl_GetString(objv[2]), + p->pParent->mxPathname, zName + ); + for(pBuffer=p->pBuffer; pBuffer; pBuffer=pBuffer->pNext){ + if( 0==strcmp(pBuffer->zFile, zName) ) break; + } + ckfree(zName); + if( !pBuffer ){ + Tcl_AppendResult(interp, "no such file: ", Tcl_GetString(objv[2]), 0); + return TCL_ERROR; + } + if( objc==4 ){ + int n; + u8 *a = Tcl_GetByteArrayFromObj(objv[3], &n); + int pgsz = pBuffer->pgsz; + if( pgsz==0 ) pgsz = 32768; + for(i=0; i*pgszaPage[i], &a[i*pgsz], nByte); + } + } + + pObj = Tcl_NewObj(); + for(i=0; pBuffer->aPage[i]; i++){ + int pgsz = pBuffer->pgsz; + if( pgsz==0 ) pgsz = 32768; + Tcl_AppendObjToObj(pObj, Tcl_NewByteArrayObj(pBuffer->aPage[i], pgsz)); + } + Tcl_SetObjResult(interp, pObj); + break; + } + + case CMD_FILTER: { + static struct VfsMethod { + char *zName; + int mask; + } vfsmethod [] = { + { "xShmOpen", TESTVFS_SHMOPEN_MASK }, + { "xShmLock", TESTVFS_SHMLOCK_MASK }, + { "xShmBarrier", TESTVFS_SHMBARRIER_MASK }, + { "xShmUnmap", TESTVFS_SHMCLOSE_MASK }, + { "xShmMap", TESTVFS_SHMMAP_MASK }, + { "xSync", TESTVFS_SYNC_MASK }, + { "xDelete", TESTVFS_DELETE_MASK }, + { "xWrite", TESTVFS_WRITE_MASK }, + { "xTruncate", TESTVFS_TRUNCATE_MASK }, + { "xOpen", TESTVFS_OPEN_MASK }, + { "xClose", TESTVFS_CLOSE_MASK }, + { "xAccess", TESTVFS_ACCESS_MASK }, + }; + Tcl_Obj **apElem = 0; + int nElem = 0; + int i; + int mask = 0; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "LIST"); + return TCL_ERROR; + } + if( Tcl_ListObjGetElements(interp, objv[2], &nElem, &apElem) ){ + return TCL_ERROR; + } + Tcl_ResetResult(interp); + for(i=0; imask = mask; + break; + } + + case CMD_SCRIPT: { + if( objc==3 ){ + int nByte; + if( p->pScript ){ + Tcl_DecrRefCount(p->pScript); + ckfree((char *)p->apScript); + p->apScript = 0; + p->nScript = 0; + p->pScript = 0; + } + Tcl_GetStringFromObj(objv[2], &nByte); + if( nByte>0 ){ + p->pScript = Tcl_DuplicateObj(objv[2]); + Tcl_IncrRefCount(p->pScript); + } + }else if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?SCRIPT?"); + return TCL_ERROR; + } + + Tcl_ResetResult(interp); + if( p->pScript ) Tcl_SetObjResult(interp, p->pScript); + + break; + } + + /* + ** TESTVFS ioerr ?IFAIL PERSIST? + ** + ** Where IFAIL is an integer and PERSIST is boolean. + */ + case CMD_CANTOPENERR: + case CMD_IOERR: + case CMD_FULLERR: { + TestFaultInject *pTest; + int iRet; + + switch( aSubcmd[i].eCmd ){ + case CMD_IOERR: pTest = &p->ioerr_err; break; + case CMD_FULLERR: pTest = &p->full_err; break; + case CMD_CANTOPENERR: pTest = &p->cantopen_err; break; + default: assert(0); + } + iRet = pTest->nFail; + pTest->nFail = 0; + pTest->eFault = 0; + pTest->iCnt = 0; + + if( objc==4 ){ + int iCnt, iPersist; + if( TCL_OK!=Tcl_GetIntFromObj(interp, objv[2], &iCnt) + || TCL_OK!=Tcl_GetBooleanFromObj(interp, objv[3], &iPersist) + ){ + return TCL_ERROR; + } + pTest->eFault = iPersist?FAULT_INJECT_PERSISTENT:FAULT_INJECT_TRANSIENT; + pTest->iCnt = iCnt; + }else if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?CNT PERSIST?"); + return TCL_ERROR; + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(iRet)); + break; + } + + case CMD_DELETE: { + Tcl_DeleteCommand(interp, Tcl_GetString(objv[0])); + break; + } + + case CMD_DEVCHAR: { + struct DeviceFlag { + char *zName; + int iValue; + } aFlag[] = { + { "default", -1 }, + { "atomic", SQLITE_IOCAP_ATOMIC }, + { "atomic512", SQLITE_IOCAP_ATOMIC512 }, + { "atomic1k", SQLITE_IOCAP_ATOMIC1K }, + { "atomic2k", SQLITE_IOCAP_ATOMIC2K }, + { "atomic4k", SQLITE_IOCAP_ATOMIC4K }, + { "atomic8k", SQLITE_IOCAP_ATOMIC8K }, + { "atomic16k", SQLITE_IOCAP_ATOMIC16K }, + { "atomic32k", SQLITE_IOCAP_ATOMIC32K }, + { "atomic64k", SQLITE_IOCAP_ATOMIC64K }, + { "sequential", SQLITE_IOCAP_SEQUENTIAL }, + { "safe_append", SQLITE_IOCAP_SAFE_APPEND }, + { "undeletable_when_open", SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN }, + { 0, 0 } + }; + Tcl_Obj *pRet; + int iFlag; + + if( objc>3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?ATTR-LIST?"); + return TCL_ERROR; + } + if( objc==3 ){ + int j; + int iNew = 0; + Tcl_Obj **flags = 0; + int nFlags = 0; + + if( Tcl_ListObjGetElements(interp, objv[2], &nFlags, &flags) ){ + return TCL_ERROR; + } + + for(j=0; j1 ){ + Tcl_AppendResult(interp, "bad flags: ", Tcl_GetString(objv[2]), 0); + return TCL_ERROR; + } + iNew |= aFlag[idx].iValue; + } + + p->iDevchar = iNew; + } + + pRet = Tcl_NewObj(); + for(iFlag=0; iFlagiDevchar & aFlag[iFlag].iValue ){ + Tcl_ListObjAppendElement( + interp, pRet, Tcl_NewStringObj(aFlag[iFlag].zName, -1) + ); + } + } + Tcl_SetObjResult(interp, pRet); + + break; + } + + case CMD_SECTORSIZE: { + if( objc>3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?VALUE?"); + return TCL_ERROR; + } + if( objc==3 ){ + int iNew = 0; + if( Tcl_GetIntFromObj(interp, objv[2], &iNew) ){ + return TCL_ERROR; + } + p->iSectorsize = iNew; + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(p->iSectorsize)); + break; + } + } + + return TCL_OK; +} + +static void testvfs_obj_del(ClientData cd){ + Testvfs *p = (Testvfs *)cd; + if( p->pScript ) Tcl_DecrRefCount(p->pScript); + sqlite3_vfs_unregister(p->pVfs); + ckfree((char *)p->apScript); + ckfree((char *)p->pVfs); + ckfree((char *)p); +} + +/* +** Usage: testvfs VFSNAME ?SWITCHES? +** +** Switches are: +** +** -noshm BOOLEAN (True to omit shm methods. Default false) +** -default BOOLEAN (True to make the vfs default. Default false) +** +** This command creates two things when it is invoked: an SQLite VFS, and +** a Tcl command. Both are named VFSNAME. The VFS is installed. It is not +** installed as the default VFS. +** +** The VFS passes all file I/O calls through to the underlying VFS. +** +** Whenever the xShmMap method of the VFS +** is invoked, the SCRIPT is executed as follows: +** +** SCRIPT xShmMap FILENAME ID +** +** The value returned by the invocation of SCRIPT above is interpreted as +** an SQLite error code and returned to SQLite. Either a symbolic +** "SQLITE_OK" or numeric "0" value may be returned. +** +** The contents of the shared-memory buffer associated with a given file +** may be read and set using the following command: +** +** VFSNAME shm FILENAME ?NEWVALUE? +** +** When the xShmLock method is invoked by SQLite, the following script is +** run: +** +** SCRIPT xShmLock FILENAME ID LOCK +** +** where LOCK is of the form "OFFSET NBYTE lock/unlock shared/exclusive" +*/ +static int testvfs_cmd( + ClientData cd, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static sqlite3_vfs tvfs_vfs = { + 2, /* iVersion */ + 0, /* szOsFile */ + 0, /* mxPathname */ + 0, /* pNext */ + 0, /* zName */ + 0, /* pAppData */ + tvfsOpen, /* xOpen */ + tvfsDelete, /* xDelete */ + tvfsAccess, /* xAccess */ + tvfsFullPathname, /* xFullPathname */ +#ifndef SQLITE_OMIT_LOAD_EXTENSION + tvfsDlOpen, /* xDlOpen */ + tvfsDlError, /* xDlError */ + tvfsDlSym, /* xDlSym */ + tvfsDlClose, /* xDlClose */ +#else + 0, /* xDlOpen */ + 0, /* xDlError */ + 0, /* xDlSym */ + 0, /* xDlClose */ +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + tvfsRandomness, /* xRandomness */ + tvfsSleep, /* xSleep */ + tvfsCurrentTime, /* xCurrentTime */ + 0, /* xGetLastError */ + 0, /* xCurrentTimeInt64 */ + }; + + Testvfs *p; /* New object */ + sqlite3_vfs *pVfs; /* New VFS */ + char *zVfs; + int nByte; /* Bytes of space to allocate at p */ + + int i; + int isNoshm = 0; /* True if -noshm is passed */ + int isDefault = 0; /* True if -default is passed */ + int szOsFile = 0; /* Value passed to -szosfile */ + int mxPathname = -1; /* Value passed to -mxpathname */ + int iVersion = 2; /* Value passed to -iversion */ + + if( objc<2 || 0!=(objc%2) ) goto bad_args; + for(i=2; i2 && 0==strncmp("-noshm", zSwitch, nSwitch) ){ + if( Tcl_GetBooleanFromObj(interp, objv[i+1], &isNoshm) ){ + return TCL_ERROR; + } + } + else if( nSwitch>2 && 0==strncmp("-default", zSwitch, nSwitch) ){ + if( Tcl_GetBooleanFromObj(interp, objv[i+1], &isDefault) ){ + return TCL_ERROR; + } + } + else if( nSwitch>2 && 0==strncmp("-szosfile", zSwitch, nSwitch) ){ + if( Tcl_GetIntFromObj(interp, objv[i+1], &szOsFile) ){ + return TCL_ERROR; + } + } + else if( nSwitch>2 && 0==strncmp("-mxpathname", zSwitch, nSwitch) ){ + if( Tcl_GetIntFromObj(interp, objv[i+1], &mxPathname) ){ + return TCL_ERROR; + } + } + else if( nSwitch>2 && 0==strncmp("-iversion", zSwitch, nSwitch) ){ + if( Tcl_GetIntFromObj(interp, objv[i+1], &iVersion) ){ + return TCL_ERROR; + } + } + else{ + goto bad_args; + } + } + + if( szOsFileiDevchar = -1; + p->iSectorsize = -1; + + /* Create the new object command before querying SQLite for a default VFS + ** to use for 'real' IO operations. This is because creating the new VFS + ** may delete an existing [testvfs] VFS of the same name. If such a VFS + ** is currently the default, the new [testvfs] may end up calling the + ** methods of a deleted object. + */ + Tcl_CreateObjCommand(interp, zVfs, testvfs_obj_cmd, p, testvfs_obj_del); + p->pParent = sqlite3_vfs_find(0); + p->interp = interp; + + p->zName = (char *)&p[1]; + memcpy(p->zName, zVfs, strlen(zVfs)+1); + + pVfs = (sqlite3_vfs *)ckalloc(sizeof(sqlite3_vfs)); + memcpy(pVfs, &tvfs_vfs, sizeof(sqlite3_vfs)); + pVfs->pAppData = (void *)p; + pVfs->iVersion = iVersion; + pVfs->zName = p->zName; + pVfs->mxPathname = p->pParent->mxPathname; + if( mxPathname>=0 && mxPathnamemxPathname ){ + pVfs->mxPathname = mxPathname; + } + pVfs->szOsFile = szOsFile; + p->pVfs = pVfs; + p->isNoshm = isNoshm; + p->mask = TESTVFS_ALL_MASK; + + sqlite3_vfs_register(pVfs, isDefault); + + return TCL_OK; + + bad_args: + Tcl_WrongNumArgs(interp, 1, objv, "VFSNAME ?-noshm BOOL? ?-default BOOL? ?-mxpathname INT? ?-szosfile INT? ?-iversion INT?"); + return TCL_ERROR; +} + +int Sqlitetestvfs_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "testvfs", testvfs_cmd, 0, 0); + return TCL_OK; +} + +#endif diff --git a/src/trigger.c b/src/trigger.c index f57d560..27fc708 100644 --- a/src/trigger.c +++ b/src/trigger.c @@ -496,6 +496,7 @@ void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr){ if( !noErr ){ sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0); } + pParse->checkSchema = 1; goto drop_trigger_cleanup; } sqlite3DropTriggerPtr(pParse, pTrigger); @@ -826,6 +827,7 @@ static TriggerPrg *codeRowTrigger( pSubParse->pToplevel = pTop; pSubParse->zAuthContext = pTrigger->zName; pSubParse->eTriggerOp = pTrigger->op; + pSubParse->nQueryLoop = pParse->nQueryLoop; v = sqlite3GetVdbe(pSubParse); if( v ){ diff --git a/src/update.c b/src/update.c index 66bf8ca..fe8344c 100644 --- a/src/update.c +++ b/src/update.c @@ -8,7 +8,7 @@ ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** -************************************************************************* +sqlite************************************************************************* ** This file contains C code routines that are called by the parser ** to handle UPDATE statements. */ @@ -212,6 +212,7 @@ void sqlite3Update( pRowidExpr = pChanges->a[i].pExpr; }else{ sqlite3ErrorMsg(pParse, "no such column: %s", pChanges->a[i].zName); + pParse->checkSchema = 1; goto update_cleanup; } } @@ -396,8 +397,7 @@ void sqlite3Update( ); for(i=0; inCol; i++){ if( aXRef[i]<0 || oldmask==0xffffffff || (oldmask & (1<xTrace */ Db *pDb = 0; /* Database to detach at end of vacuum */ int isMemDb; /* True if vacuuming a :memory: database */ - int nRes; + int nRes; /* Bytes of reserved space at the end of each page */ + int nDb; /* Number of attached databases */ if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); @@ -117,7 +118,7 @@ int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_xTrace = db->xTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks; + db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin; db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder); db->xTrace = 0; @@ -138,15 +139,18 @@ int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ ** time to parse and run the PRAGMA to turn journalling off than it does ** to write the journal header file. */ + nDb = db->nDb; if( sqlite3TempInMemory(db) ){ zSql = "ATTACH ':memory:' AS vacuum_db;"; }else{ zSql = "ATTACH '' AS vacuum_db;"; } rc = execSql(db, pzErrMsg, zSql); + if( db->nDb>nDb ){ + pDb = &db->aDb[db->nDb-1]; + assert( strcmp(pDb->zName,"vacuum_db")==0 ); + } if( rc!=SQLITE_OK ) goto end_of_vacuum; - pDb = &db->aDb[db->nDb-1]; - assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 ); pTemp = db->aDb[db->nDb-1].pBt; /* The call to execSql() to attach the temp database has left the file @@ -168,6 +172,12 @@ int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ } #endif + /* Do not attempt to change the page size for a WAL database */ + if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain)) + ==PAGER_JOURNALMODE_WAL ){ + db->nextPagesize = 0; + } + if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0) || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0)) || NEVER(db->mallocFailed) @@ -304,6 +314,7 @@ end_of_vacuum: db->nChange = saved_nChange; db->nTotalChange = saved_nTotalChange; db->xTrace = saved_xTrace; + sqlite3BtreeSetPageSize(pMain, -1, -1, 1); /* Currently there is an SQL level transaction open on the vacuum ** database. No locks are held on any other files (since the main file diff --git a/src/vdbe.c b/src/vdbe.c index 33dd2da..6813837 100644 --- a/src/vdbe.c +++ b/src/vdbe.c @@ -480,22 +480,6 @@ static void registerTrace(FILE *out, int iReg, Mem *p){ #define CHECK_FOR_INTERRUPT \ if( db->u1.isInterrupted ) goto abort_due_to_interrupt; -#ifdef SQLITE_DEBUG -static int fileExists(sqlite3 *db, const char *zFile){ - int res = 0; - int rc = SQLITE_OK; -#ifdef SQLITE_TEST - /* If we are currently testing IO errors, then do not call OsAccess() to - ** test for the presence of zFile. This is because any IO error that - ** occurs here will not be reported, causing the test to fail. - */ - extern int sqlite3_io_error_pending; - if( sqlite3_io_error_pending<=0 ) -#endif - rc = sqlite3OsAccess(db->pVfs, zFile, SQLITE_ACCESS_EXISTS, &res); - return (res && rc==SQLITE_OK); -} -#endif #ifndef NDEBUG /* @@ -594,9 +578,7 @@ int sqlite3VdbeExec( #endif #ifdef SQLITE_DEBUG sqlite3BeginBenignMalloc(); - if( p->pc==0 - && ((p->db->flags & SQLITE_VdbeListing) || fileExists(db, "vdbe_explain")) - ){ + if( p->pc==0 && (p->db->flags & SQLITE_VdbeListing)!=0 ){ int i; printf("VDBE Program Listing:\n"); sqlite3VdbePrintSql(p); @@ -604,9 +586,6 @@ int sqlite3VdbeExec( sqlite3VdbePrintOp(stdout, i, &aOp[i]); } } - if( fileExists(db, "vdbe_trace") ){ - p->trace = stdout; - } sqlite3EndBenignMalloc(); #endif for(pc=p->pc; rc==SQLITE_OK; pc++){ @@ -628,13 +607,6 @@ int sqlite3VdbeExec( } sqlite3VdbePrintOp(p->trace, pc, pOp); } - if( p->trace==0 && pc==0 ){ - sqlite3BeginBenignMalloc(); - if( fileExists(db, "vdbe_sqltrace") ){ - sqlite3VdbePrintSql(p); - } - sqlite3EndBenignMalloc(); - } #endif @@ -989,38 +961,23 @@ case OP_Blob: { /* out2-prerelease */ break; } -/* Opcode: Variable P1 P2 P3 P4 * +/* Opcode: Variable P1 P2 * P4 * ** -** Transfer the values of bound parameters P1..P1+P3-1 into registers -** P2..P2+P3-1. +** Transfer the values of bound parameter P1 into register P2 ** ** If the parameter is named, then its name appears in P4 and P3==1. ** The P4 value is used by sqlite3_bind_parameter_name(). */ -case OP_Variable: { - int p1; /* Variable to copy from */ - int p2; /* Register to copy to */ - int n; /* Number of values left to copy */ +case OP_Variable: { /* out2-prerelease */ Mem *pVar; /* Value being transferred */ - p1 = pOp->p1 - 1; - p2 = pOp->p2; - n = pOp->p3; - assert( p1>=0 && p1+n<=p->nVar ); - assert( p2>=1 && p2+n-1<=p->nMem ); - assert( pOp->p4.z==0 || pOp->p3==1 || pOp->p3==0 ); - - while( n-- > 0 ){ - pVar = &p->aVar[p1++]; - if( sqlite3VdbeMemTooBig(pVar) ){ - goto too_big; - } - pOut = &aMem[p2++]; - sqlite3VdbeMemReleaseExternal(pOut); - pOut->flags = MEM_Null; - sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); - UPDATE_MAX_BLOBSIZE(pOut); + assert( pOp->p1>0 && pOp->p1<=p->nVar ); + pVar = &p->aVar[pOp->p1 - 1]; + if( sqlite3VdbeMemTooBig(pVar) ){ + goto too_big; } + sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); + UPDATE_MAX_BLOBSIZE(pOut); break; } @@ -1383,7 +1340,7 @@ case OP_Function: { for(i=0; ip2, pArg); + REGISTER_TRACE(pOp->p2+i, pArg); } assert( pOp->p4type==P4_FUNCDEF || pOp->p4type==P4_VDBEFUNC ); @@ -3068,10 +3025,10 @@ case OP_OpenWrite: { ** ** Open a new cursor P1 to a transient table. ** The cursor is always opened read/write even if -** the main database is read-only. The transient or virtual +** the main database is read-only. The ephemeral ** table is deleted automatically when the cursor is closed. ** -** P2 is the number of columns in the virtual table. +** P2 is the number of columns in the ephemeral table. ** The cursor points to a BTree table if P4==0 and to a BTree index ** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure ** that defines the format of keys in the index. @@ -3082,6 +3039,14 @@ case OP_OpenWrite: { ** this opcode. Then this opcode was call OpenVirtual. But ** that created confusion with the whole virtual-table idea. */ +/* Opcode: OpenAutoindex P1 P2 * P4 * +** +** This opcode works the same as OP_OpenEphemeral. It has a +** different name to distinguish its use. Tables created using +** by this opcode will be used for automatically created transient +** indices in joins. +*/ +case OP_OpenAutoindex: case OP_OpenEphemeral: { VdbeCursor *pCx; static const int openFlags = @@ -4173,14 +4138,13 @@ case OP_Rewind: { /* jump */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); + res = 1; if( (pCrsr = pC->pCursor)!=0 ){ rc = sqlite3BtreeFirst(pCrsr, &res); pC->atFirst = res==0 ?1:0; pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; pC->rowidIsValid = 0; - }else{ - res = 1; } pC->nullRow = (u8)res; assert( pOp->p2>0 && pOp->p2nOp ); @@ -4190,7 +4154,7 @@ case OP_Rewind: { /* jump */ break; } -/* Opcode: Next P1 P2 * * * +/* Opcode: Next P1 P2 * * P5 ** ** Advance cursor P1 so that it points to the next key/data pair in its ** table or index. If there are no more key/value pairs then fall through @@ -4199,9 +4163,12 @@ case OP_Rewind: { /* jump */ ** ** The P1 cursor must be for a real table, not a pseudo-table. ** +** If P5 is positive and the jump is taken, then event counter +** number P5-1 in the prepared statement is incremented. +** ** See also: Prev */ -/* Opcode: Prev P1 P2 * * * +/* Opcode: Prev P1 P2 * * P5 ** ** Back up cursor P1 so that it points to the previous key/data pair in its ** table or index. If there is no previous key/value pairs then fall through @@ -4209,6 +4176,9 @@ case OP_Rewind: { /* jump */ ** jump immediately to P2. ** ** The P1 cursor must be for a real table, not a pseudo-table. +** +** If P5 is positive and the jump is taken, then event counter +** number P5-1 in the prepared statement is incremented. */ case OP_Prev: /* jump */ case OP_Next: { /* jump */ @@ -4218,6 +4188,7 @@ case OP_Next: { /* jump */ CHECK_FOR_INTERRUPT; assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p5<=ArraySize(p->aCounter) ); pC = p->apCsr[pOp->p1]; if( pC==0 ){ break; /* See ticket #2273 */ @@ -5171,6 +5142,139 @@ case OP_AggFinal: { break; } +#ifndef SQLITE_OMIT_WAL +/* Opcode: Checkpoint P1 * * * * +** +** Checkpoint database P1. This is a no-op if P1 is not currently in +** WAL mode. +*/ +case OP_Checkpoint: { + rc = sqlite3Checkpoint(db, pOp->p1); + break; +}; +#endif + +#ifndef SQLITE_OMIT_PRAGMA +/* Opcode: JournalMode P1 P2 P3 * P5 +** +** Change the journal mode of database P1 to P3. P3 must be one of the +** PAGER_JOURNALMODE_XXX values. If changing between the various rollback +** modes (delete, truncate, persist, off and memory), this is a simple +** operation. No IO is required. +** +** If changing into or out of WAL mode the procedure is more complicated. +** +** Write a string containing the final journal-mode to register P2. +*/ +case OP_JournalMode: { /* out2-prerelease */ + Btree *pBt; /* Btree to change journal mode of */ + Pager *pPager; /* Pager associated with pBt */ + int eNew; /* New journal mode */ + int eOld; /* The old journal mode */ + const char *zFilename; /* Name of database file for pPager */ + + eNew = pOp->p3; + assert( eNew==PAGER_JOURNALMODE_DELETE + || eNew==PAGER_JOURNALMODE_TRUNCATE + || eNew==PAGER_JOURNALMODE_PERSIST + || eNew==PAGER_JOURNALMODE_OFF + || eNew==PAGER_JOURNALMODE_MEMORY + || eNew==PAGER_JOURNALMODE_WAL + || eNew==PAGER_JOURNALMODE_QUERY + ); + assert( pOp->p1>=0 && pOp->p1nDb ); + + /* This opcode is used in two places: PRAGMA journal_mode and ATTACH. + ** In PRAGMA journal_mode, the sqlite3VdbeUsesBtree() routine is called + ** when the statment is prepared and so p->aMutex.nMutex>0. All mutexes + ** are already acquired. But when used in ATTACH, sqlite3VdbeUsesBtree() + ** is not called when the statement is prepared because it requires the + ** iDb index of the database as a parameter, and the database has not + ** yet been attached so that index is unavailable. We have to wait + ** until runtime (now) to get the mutex on the newly attached database. + ** No other mutexes are required by the ATTACH command so this is safe + ** to do. + */ + assert( (p->btreeMask & (1<p1))!=0 || p->aMutex.nMutex==0 ); + if( p->aMutex.nMutex==0 ){ + /* This occurs right after ATTACH. Get a mutex on the newly ATTACHed + ** database. */ + sqlite3VdbeUsesBtree(p, pOp->p1); + sqlite3VdbeMutexArrayEnter(p); + } + + pBt = db->aDb[pOp->p1].pBt; + pPager = sqlite3BtreePager(pBt); + eOld = sqlite3PagerGetJournalMode(pPager); + if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld; + if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld; + +#ifndef SQLITE_OMIT_WAL + zFilename = sqlite3PagerFilename(pPager); + + /* Do not allow a transition to journal_mode=WAL for a database + ** in temporary storage or if the VFS does not support shared memory + */ + if( eNew==PAGER_JOURNALMODE_WAL + && (zFilename[0]==0 /* Temp file */ + || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */ + ){ + eNew = eOld; + } + + if( (eNew!=eOld) + && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL) + ){ + if( !db->autoCommit || db->activeVdbeCnt>1 ){ + rc = SQLITE_ERROR; + sqlite3SetString(&p->zErrMsg, db, + "cannot change %s wal mode from within a transaction", + (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of") + ); + break; + }else{ + + if( eOld==PAGER_JOURNALMODE_WAL ){ + /* If leaving WAL mode, close the log file. If successful, the call + ** to PagerCloseWal() checkpoints and deletes the write-ahead-log + ** file. An EXCLUSIVE lock may still be held on the database file + ** after a successful return. + */ + rc = sqlite3PagerCloseWal(pPager); + if( rc==SQLITE_OK ){ + sqlite3PagerSetJournalMode(pPager, eNew); + } + }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ + /* Cannot transition directly from MEMORY to WAL. Use mode OFF + ** as an intermediate */ + sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); + } + + /* Open a transaction on the database file. Regardless of the journal + ** mode, this transaction always uses a rollback journal. + */ + assert( sqlite3BtreeIsInTrans(pBt)==0 ); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); + } + } + } +#endif /* ifndef SQLITE_OMIT_WAL */ + + if( rc ){ + eNew = eOld; + } + eNew = sqlite3PagerSetJournalMode(pPager, eNew); + + pOut = &aMem[pOp->p2]; + pOut->flags = MEM_Str|MEM_Static|MEM_Term; + pOut->z = (char *)sqlite3JournalModename(eNew); + pOut->n = sqlite3Strlen30(pOut->z); + pOut->enc = SQLITE_UTF8; + sqlite3VdbeChangeEncoding(pOut, encoding); + break; +}; +#endif /* SQLITE_OMIT_PRAGMA */ #if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) /* Opcode: Vacuum * * * * * @@ -5616,19 +5720,7 @@ case OP_VUpdate: { ** Write the current number of pages in database P1 to memory cell P2. */ case OP_Pagecount: { /* out2-prerelease */ - int p1; - int nPage; - Pager *pPager; - - p1 = pOp->p1; - pPager = sqlite3BtreePager(db->aDb[p1].pBt); - rc = sqlite3PagerPagecount(pPager, &nPage); - /* OP_Pagecount is always called from within a read transaction. The - ** page count has already been successfully read and cached. So the - ** sqlite3PagerPagecount() call above cannot fail. */ - if( ALWAYS(rc==SQLITE_OK) ){ - pOut->u.i = nPage; - } + pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt); break; } #endif diff --git a/src/vdbeInt.h b/src/vdbeInt.h index 489e8da..e8e1585 100644 --- a/src/vdbeInt.h +++ b/src/vdbeInt.h @@ -311,7 +311,7 @@ struct Vdbe { int btreeMask; /* Bitmask of db->aDb[] entries referenced */ i64 startTime; /* Time when query started - used for profiling */ BtreeMutexArray aMutex; /* An array of Btree used here and needing locks */ - int aCounter[2]; /* Counters used by sqlite3_stmt_status() */ + int aCounter[3]; /* Counters used by sqlite3_stmt_status() */ char *zSql; /* Text of the SQL statement that generated this */ void *pFree; /* Free this when deleting the vdbe */ i64 nFkConstraint; /* Number of imm. FK constraints this VM */ diff --git a/src/vdbeapi.c b/src/vdbeapi.c index 125f325..2f5aaa3 100644 --- a/src/vdbeapi.c +++ b/src/vdbeapi.c @@ -306,6 +306,27 @@ void sqlite3_result_error_nomem(sqlite3_context *pCtx){ pCtx->s.db->mallocFailed = 1; } +/* +** This function is called after a transaction has been committed. It +** invokes callbacks registered with sqlite3_wal_hook() as required. +*/ +static int doWalCallbacks(sqlite3 *db){ + int rc = SQLITE_OK; +#ifndef SQLITE_OMIT_WAL + int i; + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + int nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt)); + if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){ + rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zName, nEntry); + } + } + } +#endif + return rc; +} + /* ** Execute the statement pStmt, either until a row of data is ready, the ** statement is completely executed or an error occurs. @@ -321,9 +342,12 @@ static int sqlite3Step(Vdbe *p){ assert(p); if( p->magic!=VDBE_MAGIC_RUN ){ - sqlite3_log(SQLITE_MISUSE, - "attempt to step a halted statement: [%s]", p->zSql); - return SQLITE_MISUSE_BKPT; + /* We used to require that sqlite3_reset() be called before retrying + ** sqlite3_step() after any error. But after 3.6.23, we changed this + ** so that sqlite3_reset() would be called automatically instead of + ** throwing the error. + */ + sqlite3_reset((sqlite3_stmt*)p); } /* Check that malloc() has not failed. If it has, return early. */ @@ -351,9 +375,7 @@ static int sqlite3Step(Vdbe *p){ #ifndef SQLITE_OMIT_TRACE if( db->xProfile && !db->init.busy ){ - double rNow; - sqlite3OsCurrentTime(db->pVfs, &rNow); - p->startTime = (u64)((rNow - (int)rNow)*3600.0*24.0*1000000000.0); + sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime); } #endif @@ -374,16 +396,20 @@ static int sqlite3Step(Vdbe *p){ /* Invoke the profile callback if there is one */ if( rc!=SQLITE_ROW && db->xProfile && !db->init.busy && p->zSql ){ - double rNow; - u64 elapseTime; - - sqlite3OsCurrentTime(db->pVfs, &rNow); - elapseTime = (u64)((rNow - (int)rNow)*3600.0*24.0*1000000000.0); - elapseTime -= p->startTime; - db->xProfile(db->pProfileArg, p->zSql, elapseTime); + sqlite3_int64 iNow; + sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); + db->xProfile(db->pProfileArg, p->zSql, iNow - p->startTime); } #endif + if( rc==SQLITE_DONE ){ + assert( p->rc==SQLITE_OK ); + p->rc = doWalCallbacks(db); + if( p->rc!=SQLITE_OK ){ + rc = SQLITE_ERROR; + } + } + db->errCode = rc; if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){ p->rc = SQLITE_NOMEM; diff --git a/src/vdbeaux.c b/src/vdbeaux.c index 40a46ff..cf53f90 100644 --- a/src/vdbeaux.c +++ b/src/vdbeaux.c @@ -743,7 +743,7 @@ void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){ pOp->p4.pKeyInfo = pKeyInfo; if( pKeyInfo ){ u8 *aSortOrder; - memcpy(pKeyInfo, zP4, nByte); + memcpy((char*)pKeyInfo, zP4, nByte - nField); aSortOrder = pKeyInfo->aSortOrder; if( aSortOrder ){ pKeyInfo->aSortOrder = (unsigned char*)&pKeyInfo->aColl[nField]; @@ -814,9 +814,12 @@ void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ ** ** If a memory allocation error has occurred prior to the calling of this ** routine, then a pointer to a dummy VdbeOp will be returned. That opcode -** is readable and writable, but it has no effect. The return of a dummy -** opcode allows the call to continue functioning after a OOM fault without -** having to check to see if the return from this routine is a valid pointer. +** is readable but not writable, though it is cast to a writable value. +** The return of a dummy opcode allows the call to continue functioning +** after a OOM fault without having to check to see if the return from +** this routine is a valid pointer. But because the dummy.opcode is 0, +** dummy will never be written to. This is verified by code inspection and +** by running with Valgrind. ** ** About the #ifdef SQLITE_OMIT_TRACE: Normally, this routine is never called ** unless p->nOp>0. This is because in the absense of SQLITE_OMIT_TRACE, @@ -827,17 +830,19 @@ void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ ** check the value of p->nOp-1 before continuing. */ VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ - static VdbeOp dummy; + /* C89 specifies that the constant "dummy" will be initialized to all + ** zeros, which is correct. MSVC generates a warning, nevertheless. */ + static const VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ assert( p->magic==VDBE_MAGIC_INIT ); if( addr<0 ){ #ifdef SQLITE_OMIT_TRACE - if( p->nOp==0 ) return &dummy; + if( p->nOp==0 ) return (VdbeOp*)&dummy; #endif addr = p->nOp - 1; } assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); if( p->db->mallocFailed ){ - return &dummy; + return (VdbeOp*)&dummy; }else{ return &p->aOp[addr]; } @@ -950,6 +955,11 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){ /* ** Declare to the Vdbe that the BTree object at db->aDb[i] is used. +** +** The prepared statement has to know in advance which Btree objects +** will be used so that it can acquire mutexes on them all in sorted +** order (via sqlite3VdbeMutexArrayEnter(). Mutexes are acquired +** in order (and released in reverse order) to avoid deadlocks. */ void sqlite3VdbeUsesBtree(Vdbe *p, int i){ int mask; @@ -1449,6 +1459,7 @@ void sqlite3VdbeMakeReady( p->cacheCtr = 1; p->minWriteFileFormat = 255; p->iStatement = 0; + p->nFkConstraint = 0; #ifdef VDBE_PROFILE { int i; @@ -2137,10 +2148,17 @@ int sqlite3VdbeHalt(Vdbe *p){ */ if( eStatementOp ){ rc = sqlite3VdbeCloseStatement(p, eStatementOp); - if( rc && (NEVER(p->rc==SQLITE_OK) || p->rc==SQLITE_CONSTRAINT) ){ - p->rc = rc; - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = 0; + if( rc ){ + assert( eStatementOp==SAVEPOINT_ROLLBACK ); + if( NEVER(p->rc==SQLITE_OK) || p->rc==SQLITE_CONSTRAINT ){ + p->rc = rc; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; + } + invalidateCursorsOnModifiedBtrees(db); + sqlite3RollbackAll(db); + sqlite3CloseSavepoints(db); + db->autoCommit = 1; } } diff --git a/src/vdbeblob.c b/src/vdbeblob.c index 829b6de..b2b9f0e 100644 --- a/src/vdbeblob.c +++ b/src/vdbeblob.c @@ -191,10 +191,14 @@ int sqlite3_blob_open( sqlite3VdbeUsesBtree(v, iDb); /* Configure the OP_TableLock instruction */ +#ifdef SQLITE_OMIT_SHARED_CACHE + sqlite3VdbeChangeToNoop(v, 2, 1); +#else sqlite3VdbeChangeP1(v, 2, iDb); sqlite3VdbeChangeP2(v, 2, pTab->tnum); sqlite3VdbeChangeP3(v, 2, flags); sqlite3VdbeChangeP4(v, 2, pTab->zName, P4_TRANSIENT); +#endif /* Remove either the OP_OpenWrite or OpenRead. Set the P2 ** parameter of the other to pTab->tnum. */ diff --git a/src/vdbemem.c b/src/vdbemem.c index 8217c4d..622b617 100644 --- a/src/vdbemem.c +++ b/src/vdbemem.c @@ -1015,9 +1015,16 @@ int sqlite3ValueFromExpr( return SQLITE_OK; } op = pExpr->op; - if( op==TK_REGISTER ){ - op = pExpr->op2; /* This only happens with SQLITE_ENABLE_STAT2 */ - } + + /* op can only be TK_REGISTER is we have compiled with SQLITE_ENABLE_STAT2. + ** The ifdef here is to enable us to achieve 100% branch test coverage even + ** when SQLITE_ENABLE_STAT2 is omitted. + */ +#ifdef SQLITE_ENABLE_STAT2 + if( op==TK_REGISTER ) op = pExpr->op2; +#else + if( NEVER(op==TK_REGISTER) ) op = pExpr->op2; +#endif if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ pVal = sqlite3ValueNew(db); diff --git a/src/vtab.c b/src/vtab.c index cbb7523..24e922e 100644 --- a/src/vtab.c +++ b/src/vtab.c @@ -657,6 +657,7 @@ int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ }else{ pParse->declareVtab = 1; pParse->db = db; + pParse->nQueryLoop = 1; if( SQLITE_OK==sqlite3RunParser(pParse, zCreateTable, &zErr) && pParse->pNewTable diff --git a/src/wal.c b/src/wal.c new file mode 100644 index 0000000..5427464 --- /dev/null +++ b/src/wal.c @@ -0,0 +1,2661 @@ +/* +** 2010 February 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the implementation of a write-ahead log (WAL) used in +** "journal_mode=WAL" mode. +** +** WRITE-AHEAD LOG (WAL) FILE FORMAT +** +** A WAL file consists of a header followed by zero or more "frames". +** Each frame records the revised content of a single page from the +** database file. All changes to the database are recorded by writing +** frames into the WAL. Transactions commit when a frame is written that +** contains a commit marker. A single WAL can and usually does record +** multiple transactions. Periodically, the content of the WAL is +** transferred back into the database file in an operation called a +** "checkpoint". +** +** A single WAL file can be used multiple times. In other words, the +** WAL can fill up with frames and then be checkpointed and then new +** frames can overwrite the old ones. A WAL always grows from beginning +** toward the end. Checksums and counters attached to each frame are +** used to determine which frames within the WAL are valid and which +** are leftovers from prior checkpoints. +** +** The WAL header is 32 bytes in size and consists of the following eight +** big-endian 32-bit unsigned integer values: +** +** 0: Magic number. 0x377f0682 or 0x377f0683 +** 4: File format version. Currently 3007000 +** 8: Database page size. Example: 1024 +** 12: Checkpoint sequence number +** 16: Salt-1, random integer incremented with each checkpoint +** 20: Salt-2, a different random integer changing with each ckpt +** 24: Checksum-1 (first part of checksum for first 24 bytes of header). +** 28: Checksum-2 (second part of checksum for first 24 bytes of header). +** +** Immediately following the wal-header are zero or more frames. Each +** frame consists of a 24-byte frame-header followed by a bytes +** of page data. The frame-header is six big-endian 32-bit unsigned +** integer values, as follows: +** +** 0: Page number. +** 4: For commit records, the size of the database image in pages +** after the commit. For all other records, zero. +** 8: Salt-1 (copied from the header) +** 12: Salt-2 (copied from the header) +** 16: Checksum-1. +** 20: Checksum-2. +** +** A frame is considered valid if and only if the following conditions are +** true: +** +** (1) The salt-1 and salt-2 values in the frame-header match +** salt values in the wal-header +** +** (2) The checksum values in the final 8 bytes of the frame-header +** exactly match the checksum computed consecutively on the +** WAL header and the first 8 bytes and the content of all frames +** up to and including the current frame. +** +** The checksum is computed using 32-bit big-endian integers if the +** magic number in the first 4 bytes of the WAL is 0x377f0683 and it +** is computed using little-endian if the magic number is 0x377f0682. +** The checksum values are always stored in the frame header in a +** big-endian format regardless of which byte order is used to compute +** the checksum. The checksum is computed by interpreting the input as +** an even number of unsigned 32-bit integers: x[0] through x[N]. The +** algorithm used for the checksum is as follows: +** +** for i from 0 to n-1 step 2: +** s0 += x[i] + s1; +** s1 += x[i+1] + s0; +** endfor +** +** Note that s0 and s1 are both weighted checksums using fibonacci weights +** in reverse order (the largest fibonacci weight occurs on the first element +** of the sequence being summed.) The s1 value spans all 32-bit +** terms of the sequence whereas s0 omits the final term. +** +** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the +** WAL is transferred into the database, then the database is VFS.xSync-ed. +** The VFS.xSync operations serve as write barriers - all writes launched +** before the xSync must complete before any write that launches after the +** xSync begins. +** +** After each checkpoint, the salt-1 value is incremented and the salt-2 +** value is randomized. This prevents old and new frames in the WAL from +** being considered valid at the same time and being checkpointing together +** following a crash. +** +** READER ALGORITHM +** +** To read a page from the database (call it page number P), a reader +** first checks the WAL to see if it contains page P. If so, then the +** last valid instance of page P that is a followed by a commit frame +** or is a commit frame itself becomes the value read. If the WAL +** contains no copies of page P that are valid and which are a commit +** frame or are followed by a commit frame, then page P is read from +** the database file. +** +** To start a read transaction, the reader records the index of the last +** valid frame in the WAL. The reader uses this recorded "mxFrame" value +** for all subsequent read operations. New transactions can be appended +** to the WAL, but as long as the reader uses its original mxFrame value +** and ignores the newly appended content, it will see a consistent snapshot +** of the database from a single point in time. This technique allows +** multiple concurrent readers to view different versions of the database +** content simultaneously. +** +** The reader algorithm in the previous paragraphs works correctly, but +** because frames for page P can appear anywhere within the WAL, the +** reader has to scan the entire WAL looking for page P frames. If the +** WAL is large (multiple megabytes is typical) that scan can be slow, +** and read performance suffers. To overcome this problem, a separate +** data structure called the wal-index is maintained to expedite the +** search for frames of a particular page. +** +** WAL-INDEX FORMAT +** +** Conceptually, the wal-index is shared memory, though VFS implementations +** might choose to implement the wal-index using a mmapped file. Because +** the wal-index is shared memory, SQLite does not support journal_mode=WAL +** on a network filesystem. All users of the database must be able to +** share memory. +** +** The wal-index is transient. After a crash, the wal-index can (and should +** be) reconstructed from the original WAL file. In fact, the VFS is required +** to either truncate or zero the header of the wal-index when the last +** connection to it closes. Because the wal-index is transient, it can +** use an architecture-specific format; it does not have to be cross-platform. +** Hence, unlike the database and WAL file formats which store all values +** as big endian, the wal-index can store multi-byte values in the native +** byte order of the host computer. +** +** The purpose of the wal-index is to answer this question quickly: Given +** a page number P, return the index of the last frame for page P in the WAL, +** or return NULL if there are no frames for page P in the WAL. +** +** The wal-index consists of a header region, followed by an one or +** more index blocks. +** +** The wal-index header contains the total number of frames within the WAL +** in the the mxFrame field. +** +** Each index block except for the first contains information on +** HASHTABLE_NPAGE frames. The first index block contains information on +** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and +** HASHTABLE_NPAGE are selected so that together the wal-index header and +** first index block are the same size as all other index blocks in the +** wal-index. +** +** Each index block contains two sections, a page-mapping that contains the +** database page number associated with each wal frame, and a hash-table +** that allows readers to query an index block for a specific page number. +** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE +** for the first index block) 32-bit page numbers. The first entry in the +** first index-block contains the database page number corresponding to the +** first frame in the WAL file. The first entry in the second index block +** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in +** the log, and so on. +** +** The last index block in a wal-index usually contains less than the full +** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers, +** depending on the contents of the WAL file. This does not change the +** allocated size of the page-mapping array - the page-mapping array merely +** contains unused entries. +** +** Even without using the hash table, the last frame for page P +** can be found by scanning the page-mapping sections of each index block +** starting with the last index block and moving toward the first, and +** within each index block, starting at the end and moving toward the +** beginning. The first entry that equals P corresponds to the frame +** holding the content for that page. +** +** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers. +** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the +** hash table for each page number in the mapping section, so the hash +** table is never more than half full. The expected number of collisions +** prior to finding a match is 1. Each entry of the hash table is an +** 1-based index of an entry in the mapping section of the same +** index block. Let K be the 1-based index of the largest entry in +** the mapping section. (For index blocks other than the last, K will +** always be exactly HASHTABLE_NPAGE (4096) and for the last index block +** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table +** contain a value of 0. +** +** To look for page P in the hash table, first compute a hash iKey on +** P as follows: +** +** iKey = (P * 383) % HASHTABLE_NSLOT +** +** Then start scanning entries of the hash table, starting with iKey +** (wrapping around to the beginning when the end of the hash table is +** reached) until an unused hash slot is found. Let the first unused slot +** be at index iUnused. (iUnused might be less than iKey if there was +** wrap-around.) Because the hash table is never more than half full, +** the search is guaranteed to eventually hit an unused entry. Let +** iMax be the value between iKey and iUnused, closest to iUnused, +** where aHash[iMax]==P. If there is no iMax entry (if there exists +** no hash slot such that aHash[i]==p) then page P is not in the +** current index block. Otherwise the iMax-th mapping entry of the +** current index block corresponds to the last entry that references +** page P. +** +** A hash search begins with the last index block and moves toward the +** first index block, looking for entries corresponding to page P. On +** average, only two or three slots in each index block need to be +** examined in order to either find the last entry for page P, or to +** establish that no such entry exists in the block. Each index block +** holds over 4000 entries. So two or three index blocks are sufficient +** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10 +** comparisons (on average) suffice to either locate a frame in the +** WAL or to establish that the frame does not exist in the WAL. This +** is much faster than scanning the entire 10MB WAL. +** +** Note that entries are added in order of increasing K. Hence, one +** reader might be using some value K0 and a second reader that started +** at a later time (after additional transactions were added to the WAL +** and to the wal-index) might be using a different value K1, where K1>K0. +** Both readers can use the same hash table and mapping section to get +** the correct result. There may be entries in the hash table with +** K>K0 but to the first reader, those entries will appear to be unused +** slots in the hash table and so the first reader will get an answer as +** if no values greater than K0 had ever been inserted into the hash table +** in the first place - which is what reader one wants. Meanwhile, the +** second reader using K1 will see additional values that were inserted +** later, which is exactly what reader two wants. +** +** When a rollback occurs, the value of K is decreased. Hash table entries +** that correspond to frames greater than the new K value are removed +** from the hash table at this point. +*/ +#ifndef SQLITE_OMIT_WAL + +#include "wal.h" + +/* +** Trace output macros +*/ +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +int sqlite3WalTrace = 0; +# define WALTRACE(X) if(sqlite3WalTrace) sqlite3DebugPrintf X +#else +# define WALTRACE(X) +#endif + +/* +** The maximum (and only) versions of the wal and wal-index formats +** that may be interpreted by this version of SQLite. +** +** If a client begins recovering a WAL file and finds that (a) the checksum +** values in the wal-header are correct and (b) the version field is not +** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN. +** +** Similarly, if a client successfully reads a wal-index header (i.e. the +** checksum test is successful) and finds that the version field is not +** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite +** returns SQLITE_CANTOPEN. +*/ +#define WAL_MAX_VERSION 3007000 +#define WALINDEX_MAX_VERSION 3007000 + +/* +** Indices of various locking bytes. WAL_NREADER is the number +** of available reader locks and should be at least 3. +*/ +#define WAL_WRITE_LOCK 0 +#define WAL_ALL_BUT_WRITE 1 +#define WAL_CKPT_LOCK 1 +#define WAL_RECOVER_LOCK 2 +#define WAL_READ_LOCK(I) (3+(I)) +#define WAL_NREADER (SQLITE_SHM_NLOCK-3) + + +/* Object declarations */ +typedef struct WalIndexHdr WalIndexHdr; +typedef struct WalIterator WalIterator; +typedef struct WalCkptInfo WalCkptInfo; + + +/* +** The following object holds a copy of the wal-index header content. +** +** The actual header in the wal-index consists of two copies of this +** object. +*/ +struct WalIndexHdr { + u32 iVersion; /* Wal-index version */ + u32 unused; /* Unused (padding) field */ + u32 iChange; /* Counter incremented each transaction */ + u8 isInit; /* 1 when initialized */ + u8 bigEndCksum; /* True if checksums in WAL are big-endian */ + u16 szPage; /* Database page size in bytes */ + u32 mxFrame; /* Index of last valid frame in the WAL */ + u32 nPage; /* Size of database in pages */ + u32 aFrameCksum[2]; /* Checksum of last frame in log */ + u32 aSalt[2]; /* Two salt values copied from WAL header */ + u32 aCksum[2]; /* Checksum over all prior fields */ +}; + +/* +** A copy of the following object occurs in the wal-index immediately +** following the second copy of the WalIndexHdr. This object stores +** information used by checkpoint. +** +** nBackfill is the number of frames in the WAL that have been written +** back into the database. (We call the act of moving content from WAL to +** database "backfilling".) The nBackfill number is never greater than +** WalIndexHdr.mxFrame. nBackfill can only be increased by threads +** holding the WAL_CKPT_LOCK lock (which includes a recovery thread). +** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from +** mxFrame back to zero when the WAL is reset. +** +** There is one entry in aReadMark[] for each reader lock. If a reader +** holds read-lock K, then the value in aReadMark[K] is no greater than +** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) +** for any aReadMark[] means that entry is unused. aReadMark[0] is +** a special case; its value is never used and it exists as a place-holder +** to avoid having to offset aReadMark[] indexs by one. Readers holding +** WAL_READ_LOCK(0) always ignore the entire WAL and read all content +** directly from the database. +** +** The value of aReadMark[K] may only be changed by a thread that +** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of +** aReadMark[K] cannot changed while there is a reader is using that mark +** since the reader will be holding a shared lock on WAL_READ_LOCK(K). +** +** The checkpointer may only transfer frames from WAL to database where +** the frame numbers are less than or equal to every aReadMark[] that is +** in use (that is, every aReadMark[j] for which there is a corresponding +** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the +** largest value and will increase an unused aReadMark[] to mxFrame if there +** is not already an aReadMark[] equal to mxFrame. The exception to the +** previous sentence is when nBackfill equals mxFrame (meaning that everything +** in the WAL has been backfilled into the database) then new readers +** will choose aReadMark[0] which has value 0 and hence such reader will +** get all their all content directly from the database file and ignore +** the WAL. +** +** Writers normally append new frames to the end of the WAL. However, +** if nBackfill equals mxFrame (meaning that all WAL content has been +** written back into the database) and if no readers are using the WAL +** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then +** the writer will first "reset" the WAL back to the beginning and start +** writing new content beginning at frame 1. +** +** We assume that 32-bit loads are atomic and so no locks are needed in +** order to read from any aReadMark[] entries. +*/ +struct WalCkptInfo { + u32 nBackfill; /* Number of WAL frames backfilled into DB */ + u32 aReadMark[WAL_NREADER]; /* Reader marks */ +}; +#define READMARK_NOT_USED 0xffffffff + + +/* A block of WALINDEX_LOCK_RESERVED bytes beginning at +** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems +** only support mandatory file-locks, we do not read or write data +** from the region of the file on which locks are applied. +*/ +#define WALINDEX_LOCK_OFFSET (sizeof(WalIndexHdr)*2 + sizeof(WalCkptInfo)) +#define WALINDEX_LOCK_RESERVED 16 +#define WALINDEX_HDR_SIZE (WALINDEX_LOCK_OFFSET+WALINDEX_LOCK_RESERVED) + +/* Size of header before each frame in wal */ +#define WAL_FRAME_HDRSIZE 24 + +/* Size of write ahead log header, including checksum. */ +/* #define WAL_HDRSIZE 24 */ +#define WAL_HDRSIZE 32 + +/* WAL magic value. Either this value, or the same value with the least +** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit +** big-endian format in the first 4 bytes of a WAL file. +** +** If the LSB is set, then the checksums for each frame within the WAL +** file are calculated by treating all data as an array of 32-bit +** big-endian words. Otherwise, they are calculated by interpreting +** all data as 32-bit little-endian words. +*/ +#define WAL_MAGIC 0x377f0682 + +/* +** Return the offset of frame iFrame in the write-ahead log file, +** assuming a database page size of szPage bytes. The offset returned +** is to the start of the write-ahead log frame-header. +*/ +#define walFrameOffset(iFrame, szPage) ( \ + WAL_HDRSIZE + ((iFrame)-1)*(i64)((szPage)+WAL_FRAME_HDRSIZE) \ +) + +/* +** An open write-ahead log file is represented by an instance of the +** following object. +*/ +struct Wal { + sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ + sqlite3_file *pDbFd; /* File handle for the database file */ + sqlite3_file *pWalFd; /* File handle for WAL file */ + u32 iCallback; /* Value to pass to log callback (or 0) */ + int nWiData; /* Size of array apWiData */ + volatile u32 **apWiData; /* Pointer to wal-index content in memory */ + u16 szPage; /* Database page size */ + i16 readLock; /* Which read lock is being held. -1 for none */ + u8 exclusiveMode; /* Non-zero if connection is in exclusive mode */ + u8 writeLock; /* True if in a write transaction */ + u8 ckptLock; /* True if holding a checkpoint lock */ + u8 readOnly; /* True if the WAL file is open read-only */ + WalIndexHdr hdr; /* Wal-index header for current transaction */ + const char *zWalName; /* Name of WAL file */ + u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ +#ifdef SQLITE_DEBUG + u8 lockError; /* True if a locking error has occurred */ +#endif +}; + +/* +** Each page of the wal-index mapping contains a hash-table made up of +** an array of HASHTABLE_NSLOT elements of the following type. +*/ +typedef u16 ht_slot; + +/* +** This structure is used to implement an iterator that loops through +** all frames in the WAL in database page order. Where two or more frames +** correspond to the same database page, the iterator visits only the +** frame most recently written to the WAL (in other words, the frame with +** the largest index). +** +** The internals of this structure are only accessed by: +** +** walIteratorInit() - Create a new iterator, +** walIteratorNext() - Step an iterator, +** walIteratorFree() - Free an iterator. +** +** This functionality is used by the checkpoint code (see walCheckpoint()). +*/ +struct WalIterator { + int iPrior; /* Last result returned from the iterator */ + int nSegment; /* Size of the aSegment[] array */ + struct WalSegment { + int iNext; /* Next slot in aIndex[] not yet returned */ + ht_slot *aIndex; /* i0, i1, i2... such that aPgno[iN] ascend */ + u32 *aPgno; /* Array of page numbers. */ + int nEntry; /* Max size of aPgno[] and aIndex[] arrays */ + int iZero; /* Frame number associated with aPgno[0] */ + } aSegment[1]; /* One for every 32KB page in the WAL */ +}; + +/* +** Define the parameters of the hash tables in the wal-index file. There +** is a hash-table following every HASHTABLE_NPAGE page numbers in the +** wal-index. +** +** Changing any of these constants will alter the wal-index format and +** create incompatibilities. +*/ +#define HASHTABLE_NPAGE 4096 /* Must be power of 2 */ +#define HASHTABLE_HASH_1 383 /* Should be prime */ +#define HASHTABLE_NSLOT (HASHTABLE_NPAGE*2) /* Must be a power of 2 */ + +/* +** The block of page numbers associated with the first hash-table in a +** wal-index is smaller than usual. This is so that there is a complete +** hash-table on each aligned 32KB page of the wal-index. +*/ +#define HASHTABLE_NPAGE_ONE (HASHTABLE_NPAGE - (WALINDEX_HDR_SIZE/sizeof(u32))) + +/* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */ +#define WALINDEX_PGSZ ( \ + sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \ +) + +/* +** Obtain a pointer to the iPage'th page of the wal-index. The wal-index +** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are +** numbered from zero. +** +** If this call is successful, *ppPage is set to point to the wal-index +** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, +** then an SQLite error code is returned and *ppPage is set to 0. +*/ +static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ + int rc = SQLITE_OK; + + /* Enlarge the pWal->apWiData[] array if required */ + if( pWal->nWiData<=iPage ){ + int nByte = sizeof(u32*)*(iPage+1); + volatile u32 **apNew; + apNew = (volatile u32 **)sqlite3_realloc((void *)pWal->apWiData, nByte); + if( !apNew ){ + *ppPage = 0; + return SQLITE_NOMEM; + } + memset((void*)&apNew[pWal->nWiData], 0, + sizeof(u32*)*(iPage+1-pWal->nWiData)); + pWal->apWiData = apNew; + pWal->nWiData = iPage+1; + } + + /* Request a pointer to the required page from the VFS */ + if( pWal->apWiData[iPage]==0 ){ + rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, + pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] + ); + } + + *ppPage = pWal->apWiData[iPage]; + assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); + return rc; +} + +/* +** Return a pointer to the WalCkptInfo structure in the wal-index. +*/ +static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); +} + +/* +** Return a pointer to the WalIndexHdr structure in the wal-index. +*/ +static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + return (volatile WalIndexHdr*)pWal->apWiData[0]; +} + +/* +** The argument to this macro must be of type u32. On a little-endian +** architecture, it returns the u32 value that results from interpreting +** the 4 bytes as a big-endian value. On a big-endian architecture, it +** returns the value that would be produced by intepreting the 4 bytes +** of the input value as a little-endian integer. +*/ +#define BYTESWAP32(x) ( \ + (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \ + + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \ +) + +/* +** Generate or extend an 8 byte checksum based on the data in +** array aByte[] and the initial values of aIn[0] and aIn[1] (or +** initial values of 0 and 0 if aIn==NULL). +** +** The checksum is written back into aOut[] before returning. +** +** nByte must be a positive multiple of 8. +*/ +static void walChecksumBytes( + int nativeCksum, /* True for native byte-order, false for non-native */ + u8 *a, /* Content to be checksummed */ + int nByte, /* Bytes of content in a[]. Must be a multiple of 8. */ + const u32 *aIn, /* Initial checksum value input */ + u32 *aOut /* OUT: Final checksum value output */ +){ + u32 s1, s2; + u32 *aData = (u32 *)a; + u32 *aEnd = (u32 *)&a[nByte]; + + if( aIn ){ + s1 = aIn[0]; + s2 = aIn[1]; + }else{ + s1 = s2 = 0; + } + + assert( nByte>=8 ); + assert( (nByte&0x00000007)==0 ); + + if( nativeCksum ){ + do { + s1 += *aData++ + s2; + s2 += *aData++ + s1; + }while( aDatahdr into the wal-index. +** +** The checksum on pWal->hdr is updated before it is written. +*/ +static void walIndexWriteHdr(Wal *pWal){ + volatile WalIndexHdr *aHdr = walIndexHdr(pWal); + const int nCksum = offsetof(WalIndexHdr, aCksum); + + assert( pWal->writeLock ); + pWal->hdr.isInit = 1; + pWal->hdr.iVersion = WALINDEX_MAX_VERSION; + walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); + memcpy((void *)&aHdr[1], (void *)&pWal->hdr, sizeof(WalIndexHdr)); + sqlite3OsShmBarrier(pWal->pDbFd); + memcpy((void *)&aHdr[0], (void *)&pWal->hdr, sizeof(WalIndexHdr)); +} + +/* +** This function encodes a single frame header and writes it to a buffer +** supplied by the caller. A frame-header is made up of a series of +** 4-byte big-endian integers, as follows: +** +** 0: Page number. +** 4: For commit records, the size of the database image in pages +** after the commit. For all other records, zero. +** 8: Salt-1 (copied from the wal-header) +** 12: Salt-2 (copied from the wal-header) +** 16: Checksum-1. +** 20: Checksum-2. +*/ +static void walEncodeFrame( + Wal *pWal, /* The write-ahead log */ + u32 iPage, /* Database page number for frame */ + u32 nTruncate, /* New db size (or 0 for non-commit frames) */ + u8 *aData, /* Pointer to page data */ + u8 *aFrame /* OUT: Write encoded frame here */ +){ + int nativeCksum; /* True for native byte-order checksums */ + u32 *aCksum = pWal->hdr.aFrameCksum; + assert( WAL_FRAME_HDRSIZE==24 ); + sqlite3Put4byte(&aFrame[0], iPage); + sqlite3Put4byte(&aFrame[4], nTruncate); + memcpy(&aFrame[8], pWal->hdr.aSalt, 8); + + nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); + walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); + walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); + + sqlite3Put4byte(&aFrame[16], aCksum[0]); + sqlite3Put4byte(&aFrame[20], aCksum[1]); +} + +/* +** Check to see if the frame with header in aFrame[] and content +** in aData[] is valid. If it is a valid frame, fill *piPage and +** *pnTruncate and return true. Return if the frame is not valid. +*/ +static int walDecodeFrame( + Wal *pWal, /* The write-ahead log */ + u32 *piPage, /* OUT: Database page number for frame */ + u32 *pnTruncate, /* OUT: New db size (or 0 if not commit) */ + u8 *aData, /* Pointer to page data (for checksum) */ + u8 *aFrame /* Frame data */ +){ + int nativeCksum; /* True for native byte-order checksums */ + u32 *aCksum = pWal->hdr.aFrameCksum; + u32 pgno; /* Page number of the frame */ + assert( WAL_FRAME_HDRSIZE==24 ); + + /* A frame is only valid if the salt values in the frame-header + ** match the salt values in the wal-header. + */ + if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){ + return 0; + } + + /* A frame is only valid if the page number is creater than zero. + */ + pgno = sqlite3Get4byte(&aFrame[0]); + if( pgno==0 ){ + return 0; + } + + /* A frame is only valid if a checksum of the WAL header, + ** all prior frams, the first 16 bytes of this frame-header, + ** and the frame-data matches the checksum in the last 8 + ** bytes of this frame-header. + */ + nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); + walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); + walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); + if( aCksum[0]!=sqlite3Get4byte(&aFrame[16]) + || aCksum[1]!=sqlite3Get4byte(&aFrame[20]) + ){ + /* Checksum failed. */ + return 0; + } + + /* If we reach this point, the frame is valid. Return the page number + ** and the new database size. + */ + *piPage = pgno; + *pnTruncate = sqlite3Get4byte(&aFrame[4]); + return 1; +} + + +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +/* +** Names of locks. This routine is used to provide debugging output and is not +** a part of an ordinary build. +*/ +static const char *walLockName(int lockIdx){ + if( lockIdx==WAL_WRITE_LOCK ){ + return "WRITE-LOCK"; + }else if( lockIdx==WAL_CKPT_LOCK ){ + return "CKPT-LOCK"; + }else if( lockIdx==WAL_RECOVER_LOCK ){ + return "RECOVER-LOCK"; + }else{ + static char zName[15]; + sqlite3_snprintf(sizeof(zName), zName, "READ-LOCK[%d]", + lockIdx-WAL_READ_LOCK(0)); + return zName; + } +} +#endif /*defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */ + + +/* +** Set or release locks on the WAL. Locks are either shared or exclusive. +** A lock cannot be moved directly between shared and exclusive - it must go +** through the unlocked state first. +** +** In locking_mode=EXCLUSIVE, all of these routines become no-ops. +*/ +static int walLockShared(Wal *pWal, int lockIdx){ + int rc; + if( pWal->exclusiveMode ) return SQLITE_OK; + rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, + SQLITE_SHM_LOCK | SQLITE_SHM_SHARED); + WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal, + walLockName(lockIdx), rc ? "failed" : "ok")); + VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) + return rc; +} +static void walUnlockShared(Wal *pWal, int lockIdx){ + if( pWal->exclusiveMode ) return; + (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, + SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); + WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); +} +static int walLockExclusive(Wal *pWal, int lockIdx, int n){ + int rc; + if( pWal->exclusiveMode ) return SQLITE_OK; + rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, + SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE); + WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, + walLockName(lockIdx), n, rc ? "failed" : "ok")); + VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) + return rc; +} +static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){ + if( pWal->exclusiveMode ) return; + (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, + SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); + WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal, + walLockName(lockIdx), n)); +} + +/* +** Compute a hash on a page number. The resulting hash value must land +** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances +** the hash to the next value in the event of a collision. +*/ +static int walHash(u32 iPage){ + assert( iPage>0 ); + assert( (HASHTABLE_NSLOT & (HASHTABLE_NSLOT-1))==0 ); + return (iPage*HASHTABLE_HASH_1) & (HASHTABLE_NSLOT-1); +} +static int walNextHash(int iPriorHash){ + return (iPriorHash+1)&(HASHTABLE_NSLOT-1); +} + +/* +** Return pointers to the hash table and page number array stored on +** page iHash of the wal-index. The wal-index is broken into 32KB pages +** numbered starting from 0. +** +** Set output variable *paHash to point to the start of the hash table +** in the wal-index file. Set *piZero to one less than the frame +** number of the first frame indexed by this hash table. If a +** slot in the hash table is set to N, it refers to frame number +** (*piZero+N) in the log. +** +** Finally, set *paPgno so that *paPgno[1] is the page number of the +** first frame indexed by the hash table, frame (*piZero+1). +*/ +static int walHashGet( + Wal *pWal, /* WAL handle */ + int iHash, /* Find the iHash'th table */ + volatile ht_slot **paHash, /* OUT: Pointer to hash index */ + volatile u32 **paPgno, /* OUT: Pointer to page number array */ + u32 *piZero /* OUT: Frame associated with *paPgno[0] */ +){ + int rc; /* Return code */ + volatile u32 *aPgno; + + rc = walIndexPage(pWal, iHash, &aPgno); + assert( rc==SQLITE_OK || iHash>0 ); + + if( rc==SQLITE_OK ){ + u32 iZero; + volatile ht_slot *aHash; + + aHash = (volatile ht_slot *)&aPgno[HASHTABLE_NPAGE]; + if( iHash==0 ){ + aPgno = &aPgno[WALINDEX_HDR_SIZE/sizeof(u32)]; + iZero = 0; + }else{ + iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE; + } + + *paPgno = &aPgno[-1]; + *paHash = aHash; + *piZero = iZero; + } + return rc; +} + +/* +** Return the number of the wal-index page that contains the hash-table +** and page-number array that contain entries corresponding to WAL frame +** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages +** are numbered starting from 0. +*/ +static int walFramePage(u32 iFrame){ + int iHash = (iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1) / HASHTABLE_NPAGE; + assert( (iHash==0 || iFrame>HASHTABLE_NPAGE_ONE) + && (iHash>=1 || iFrame<=HASHTABLE_NPAGE_ONE) + && (iHash<=1 || iFrame>(HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE)) + && (iHash>=2 || iFrame<=HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE) + && (iHash<=2 || iFrame>(HASHTABLE_NPAGE_ONE+2*HASHTABLE_NPAGE)) + ); + return iHash; +} + +/* +** Return the page number associated with frame iFrame in this WAL. +*/ +static u32 walFramePgno(Wal *pWal, u32 iFrame){ + int iHash = walFramePage(iFrame); + if( iHash==0 ){ + return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; + } + return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE]; +} + +/* +** Remove entries from the hash table that point to WAL slots greater +** than pWal->hdr.mxFrame. +** +** This function is called whenever pWal->hdr.mxFrame is decreased due +** to a rollback or savepoint. +** +** At most only the hash table containing pWal->hdr.mxFrame needs to be +** updated. Any later hash tables will be automatically cleared when +** pWal->hdr.mxFrame advances to the point where those hash tables are +** actually needed. +*/ +static void walCleanupHash(Wal *pWal){ + volatile ht_slot *aHash = 0; /* Pointer to hash table to clear */ + volatile u32 *aPgno = 0; /* Page number array for hash table */ + u32 iZero = 0; /* frame == (aHash[x]+iZero) */ + int iLimit = 0; /* Zero values greater than this */ + int nByte; /* Number of bytes to zero in aPgno[] */ + int i; /* Used to iterate through aHash[] */ + + assert( pWal->writeLock ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 ); + + if( pWal->hdr.mxFrame==0 ) return; + + /* Obtain pointers to the hash-table and page-number array containing + ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed + ** that the page said hash-table and array reside on is already mapped. + */ + assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); + assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); + walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &aHash, &aPgno, &iZero); + + /* Zero all hash-table entries that correspond to frame numbers greater + ** than pWal->hdr.mxFrame. + */ + iLimit = pWal->hdr.mxFrame - iZero; + assert( iLimit>0 ); + for(i=0; iiLimit ){ + aHash[i] = 0; + } + } + + /* Zero the entries in the aPgno array that correspond to frames with + ** frame numbers greater than pWal->hdr.mxFrame. + */ + nByte = (int)((char *)aHash - (char *)&aPgno[iLimit+1]); + memset((void *)&aPgno[iLimit+1], 0, nByte); + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* Verify that the every entry in the mapping region is still reachable + ** via the hash table even after the cleanup. + */ + if( iLimit ){ + int i; /* Loop counter */ + int iKey; /* Hash key */ + for(i=1; i<=iLimit; i++){ + for(iKey=walHash(aPgno[i]); aHash[iKey]; iKey=walNextHash(iKey)){ + if( aHash[iKey]==i ) break; + } + assert( aHash[iKey]==i ); + } + } +#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */ +} + + +/* +** Set an entry in the wal-index that will map database page number +** pPage into WAL frame iFrame. +*/ +static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ + int rc; /* Return code */ + u32 iZero = 0; /* One less than frame number of aPgno[1] */ + volatile u32 *aPgno = 0; /* Page number array */ + volatile ht_slot *aHash = 0; /* Hash table */ + + rc = walHashGet(pWal, walFramePage(iFrame), &aHash, &aPgno, &iZero); + + /* Assuming the wal-index file was successfully mapped, populate the + ** page number array and hash table entry. + */ + if( rc==SQLITE_OK ){ + int iKey; /* Hash table key */ + int idx; /* Value to write to hash-table slot */ + int nCollide; /* Number of hash collisions */ + + idx = iFrame - iZero; + assert( idx <= HASHTABLE_NSLOT/2 + 1 ); + + /* If this is the first entry to be added to this hash-table, zero the + ** entire hash table and aPgno[] array before proceding. + */ + if( idx==1 ){ + int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]); + memset((void*)&aPgno[1], 0, nByte); + } + + /* If the entry in aPgno[] is already set, then the previous writer + ** must have exited unexpectedly in the middle of a transaction (after + ** writing one or more dirty pages to the WAL to free up memory). + ** Remove the remnants of that writers uncommitted transaction from + ** the hash-table before writing any new entries. + */ + if( aPgno[idx] ){ + walCleanupHash(pWal); + assert( !aPgno[idx] ); + } + + /* Write the aPgno[] array entry and the hash-table slot. */ + nCollide = idx; + for(iKey=walHash(iPage); aHash[iKey]; iKey=walNextHash(iKey)){ + if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT; + } + aPgno[idx] = iPage; + aHash[iKey] = (ht_slot)idx; + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* Verify that the number of entries in the hash table exactly equals + ** the number of entries in the mapping region. + */ + { + int i; /* Loop counter */ + int nEntry = 0; /* Number of entries in the hash table */ + for(i=0; ickptLock==1 || pWal->ckptLock==0 ); + assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 ); + assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE ); + assert( pWal->writeLock ); + iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock; + nLock = SQLITE_SHM_NLOCK - iLock; + rc = walLockExclusive(pWal, iLock, nLock); + if( rc ){ + return rc; + } + WALTRACE(("WAL%p: recovery begin...\n", pWal)); + + memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); + + rc = sqlite3OsFileSize(pWal->pWalFd, &nSize); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + if( nSize>WAL_HDRSIZE ){ + u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ + u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ + int szFrame; /* Number of bytes in buffer aFrame[] */ + u8 *aData; /* Pointer to data part of aFrame buffer */ + int iFrame; /* Index of last frame read */ + i64 iOffset; /* Next offset to read from log file */ + int szPage; /* Page size according to the log */ + u32 magic; /* Magic value read from WAL header */ + u32 version; /* Magic value read from WAL header */ + + /* Read in the WAL header. */ + rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + /* If the database page size is not a power of two, or is greater than + ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid + ** data. Similarly, if the 'magic' value is invalid, ignore the whole + ** WAL file. + */ + magic = sqlite3Get4byte(&aBuf[0]); + szPage = sqlite3Get4byte(&aBuf[8]); + if( (magic&0xFFFFFFFE)!=WAL_MAGIC + || szPage&(szPage-1) + || szPage>SQLITE_MAX_PAGE_SIZE + || szPage<512 + ){ + goto finished; + } + pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); + pWal->szPage = (u16)szPage; + pWal->nCkpt = sqlite3Get4byte(&aBuf[12]); + memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); + + /* Verify that the WAL header checksum is correct */ + walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, + aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum + ); + if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) + || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) + ){ + goto finished; + } + + /* Verify that the version number on the WAL format is one that + ** are able to understand */ + version = sqlite3Get4byte(&aBuf[4]); + if( version!=WAL_MAX_VERSION ){ + rc = SQLITE_CANTOPEN_BKPT; + goto finished; + } + + /* Malloc a buffer to read frames into. */ + szFrame = szPage + WAL_FRAME_HDRSIZE; + aFrame = (u8 *)sqlite3_malloc(szFrame); + if( !aFrame ){ + rc = SQLITE_NOMEM; + goto recovery_error; + } + aData = &aFrame[WAL_FRAME_HDRSIZE]; + + /* Read all frames from the log file. */ + iFrame = 0; + for(iOffset=WAL_HDRSIZE; (iOffset+szFrame)<=nSize; iOffset+=szFrame){ + u32 pgno; /* Database page number for frame */ + u32 nTruncate; /* dbsize field from frame header */ + int isValid; /* True if this frame is valid */ + + /* Read and decode the next log frame. */ + rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); + if( rc!=SQLITE_OK ) break; + isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); + if( !isValid ) break; + rc = walIndexAppend(pWal, ++iFrame, pgno); + if( rc!=SQLITE_OK ) break; + + /* If nTruncate is non-zero, this is a commit record. */ + if( nTruncate ){ + pWal->hdr.mxFrame = iFrame; + pWal->hdr.nPage = nTruncate; + pWal->hdr.szPage = (u16)szPage; + aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; + aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; + } + } + + sqlite3_free(aFrame); + } + +finished: + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo; + int i; + pWal->hdr.aFrameCksum[0] = aFrameCksum[0]; + pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; + walIndexWriteHdr(pWal); + + /* Reset the checkpoint-header. This is safe because this thread is + ** currently holding locks that exclude all other readers, writers and + ** checkpointers. + */ + pInfo = walCkptInfo(pWal); + pInfo->nBackfill = 0; + pInfo->aReadMark[0] = 0; + for(i=1; iaReadMark[i] = READMARK_NOT_USED; + } + +recovery_error: + WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok")); + walUnlockExclusive(pWal, iLock, nLock); + return rc; +} + +/* +** Close an open wal-index. +*/ +static void walIndexClose(Wal *pWal, int isDelete){ + sqlite3OsShmUnmap(pWal->pDbFd, isDelete); +} + +/* +** Open a connection to the WAL file zWalName. The database file must +** already be opened on connection pDbFd. The buffer that zWalName points +** to must remain valid for the lifetime of the returned Wal* handle. +** +** A SHARED lock should be held on the database file when this function +** is called. The purpose of this SHARED lock is to prevent any other +** client from unlinking the WAL or wal-index file. If another process +** were to do this just after this client opened one of these files, the +** system would be badly broken. +** +** If the log file is successfully opened, SQLITE_OK is returned and +** *ppWal is set to point to a new WAL handle. If an error occurs, +** an SQLite error code is returned and *ppWal is left unmodified. +*/ +int sqlite3WalOpen( + sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ + sqlite3_file *pDbFd, /* The open database file */ + const char *zWalName, /* Name of the WAL file */ + Wal **ppWal /* OUT: Allocated Wal handle */ +){ + int rc; /* Return Code */ + Wal *pRet; /* Object to allocate and return */ + int flags; /* Flags passed to OsOpen() */ + + assert( zWalName && zWalName[0] ); + assert( pDbFd ); + + /* In the amalgamation, the os_unix.c and os_win.c source files come before + ** this source file. Verify that the #defines of the locking byte offsets + ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value. + */ +#ifdef WIN_SHM_BASE + assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET ); +#endif +#ifdef UNIX_SHM_BASE + assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET ); +#endif + + + /* Allocate an instance of struct Wal to return. */ + *ppWal = 0; + pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile); + if( !pRet ){ + return SQLITE_NOMEM; + } + + pRet->pVfs = pVfs; + pRet->pWalFd = (sqlite3_file *)&pRet[1]; + pRet->pDbFd = pDbFd; + pRet->readLock = -1; + pRet->zWalName = zWalName; + + /* Open file handle on the write-ahead log file. */ + flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); + rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); + if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ + pRet->readOnly = 1; + } + + if( rc!=SQLITE_OK ){ + walIndexClose(pRet, 0); + sqlite3OsClose(pRet->pWalFd); + sqlite3_free(pRet); + }else{ + *ppWal = pRet; + WALTRACE(("WAL%d: opened\n", pRet)); + } + return rc; +} + +/* +** Find the smallest page number out of all pages held in the WAL that +** has not been returned by any prior invocation of this method on the +** same WalIterator object. Write into *piFrame the frame index where +** that page was last written into the WAL. Write into *piPage the page +** number. +** +** Return 0 on success. If there are no pages in the WAL with a page +** number larger than *piPage, then return 1. +*/ +static int walIteratorNext( + WalIterator *p, /* Iterator */ + u32 *piPage, /* OUT: The page number of the next page */ + u32 *piFrame /* OUT: Wal frame index of next page */ +){ + u32 iMin; /* Result pgno must be greater than iMin */ + u32 iRet = 0xFFFFFFFF; /* 0xffffffff is never a valid page number */ + int i; /* For looping through segments */ + + iMin = p->iPrior; + assert( iMin<0xffffffff ); + for(i=p->nSegment-1; i>=0; i--){ + struct WalSegment *pSegment = &p->aSegment[i]; + while( pSegment->iNextnEntry ){ + u32 iPg = pSegment->aPgno[pSegment->aIndex[pSegment->iNext]]; + if( iPg>iMin ){ + if( iPgiZero + pSegment->aIndex[pSegment->iNext]; + } + break; + } + pSegment->iNext++; + } + } + + *piPage = p->iPrior = iRet; + return (iRet==0xFFFFFFFF); +} + +/* +** This function merges two sorted lists into a single sorted list. +*/ +static void walMerge( + u32 *aContent, /* Pages in wal */ + ht_slot *aLeft, /* IN: Left hand input list */ + int nLeft, /* IN: Elements in array *paLeft */ + ht_slot **paRight, /* IN/OUT: Right hand input list */ + int *pnRight, /* IN/OUT: Elements in *paRight */ + ht_slot *aTmp /* Temporary buffer */ +){ + int iLeft = 0; /* Current index in aLeft */ + int iRight = 0; /* Current index in aRight */ + int iOut = 0; /* Current index in output buffer */ + int nRight = *pnRight; + ht_slot *aRight = *paRight; + + assert( nLeft>0 && nRight>0 ); + while( iRight=nRight || aContent[aLeft[iLeft]]=nLeft || aContent[aLeft[iLeft]]>dbpage ); + assert( iRight>=nRight || aContent[aRight[iRight]]>dbpage ); + } + + *paRight = aLeft; + *pnRight = iOut; + memcpy(aLeft, aTmp, sizeof(aTmp[0])*iOut); +} + +/* +** Sort the elements in list aList, removing any duplicates. +*/ +static void walMergesort( + u32 *aContent, /* Pages in wal */ + ht_slot *aBuffer, /* Buffer of at least *pnList items to use */ + ht_slot *aList, /* IN/OUT: List to sort */ + int *pnList /* IN/OUT: Number of elements in aList[] */ +){ + struct Sublist { + int nList; /* Number of elements in aList */ + ht_slot *aList; /* Pointer to sub-list content */ + }; + + const int nList = *pnList; /* Size of input list */ + int nMerge = 0; /* Number of elements in list aMerge */ + ht_slot *aMerge = 0; /* List to be merged */ + int iList; /* Index into input list */ + int iSub = 0; /* Index into aSub array */ + struct Sublist aSub[13]; /* Array of sub-lists */ + + memset(aSub, 0, sizeof(aSub)); + assert( nList<=HASHTABLE_NPAGE && nList>0 ); + assert( HASHTABLE_NPAGE==(1<<(ArraySize(aSub)-1)) ); + + for(iList=0; iListaList && p->nList<=(1<aList==&aList[iList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); + } + aSub[iSub].aList = aMerge; + aSub[iSub].nList = nMerge; + } + + for(iSub++; iSubnList<=(1<aList==&aList[nList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); + } + } + assert( aMerge==aList ); + *pnList = nMerge; + +#ifdef SQLITE_DEBUG + { + int i; + for(i=1; i<*pnList; i++){ + assert( aContent[aList[i]] > aContent[aList[i-1]] ); + } + } +#endif +} + +/* +** Free an iterator allocated by walIteratorInit(). +*/ +static void walIteratorFree(WalIterator *p){ + sqlite3ScratchFree(p); +} + +/* +** Construct a WalInterator object that can be used to loop over all +** pages in the WAL in ascending order. The caller must hold the checkpoint +** +** On success, make *pp point to the newly allocated WalInterator object +** return SQLITE_OK. Otherwise, return an error code. If this routine +** returns an error, the value of *pp is undefined. +** +** The calling routine should invoke walIteratorFree() to destroy the +** WalIterator object when it has finished with it. +*/ +static int walIteratorInit(Wal *pWal, WalIterator **pp){ + WalIterator *p; /* Return value */ + int nSegment; /* Number of segments to merge */ + u32 iLast; /* Last frame in log */ + int nByte; /* Number of bytes to allocate */ + int i; /* Iterator variable */ + ht_slot *aTmp; /* Temp space used by merge-sort */ + int rc = SQLITE_OK; /* Return Code */ + + /* This routine only runs while holding the checkpoint lock. And + ** it only runs if there is actually content in the log (mxFrame>0). + */ + assert( pWal->ckptLock && pWal->hdr.mxFrame>0 ); + iLast = pWal->hdr.mxFrame; + + /* Allocate space for the WalIterator object. */ + nSegment = walFramePage(iLast) + 1; + nByte = sizeof(WalIterator) + + (nSegment-1)*sizeof(struct WalSegment) + + iLast*sizeof(ht_slot); + p = (WalIterator *)sqlite3ScratchMalloc(nByte); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, nByte); + p->nSegment = nSegment; + + /* Allocate temporary space used by the merge-sort routine. This block + ** of memory will be freed before this function returns. + */ + aTmp = (ht_slot *)sqlite3ScratchMalloc( + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) + ); + if( !aTmp ){ + rc = SQLITE_NOMEM; + } + + for(i=0; rc==SQLITE_OK && iaSegment[p->nSegment])[iZero]; + iZero++; + + for(j=0; jaSegment[i].iZero = iZero; + p->aSegment[i].nEntry = nEntry; + p->aSegment[i].aIndex = aIndex; + p->aSegment[i].aPgno = (u32 *)aPgno; + } + } + sqlite3ScratchFree(aTmp); + + if( rc!=SQLITE_OK ){ + walIteratorFree(p); + } + *pp = p; + return rc; +} + +/* +** Copy as much content as we can from the WAL back into the database file +** in response to an sqlite3_wal_checkpoint() request or the equivalent. +** +** The amount of information copies from WAL to database might be limited +** by active readers. This routine will never overwrite a database page +** that a concurrent reader might be using. +** +** All I/O barrier operations (a.k.a fsyncs) occur in this routine when +** SQLite is in WAL-mode in synchronous=NORMAL. That means that if +** checkpoints are always run by a background thread or background +** process, foreground threads will never block on a lengthy fsync call. +** +** Fsync is called on the WAL before writing content out of the WAL and +** into the database. This ensures that if the new content is persistent +** in the WAL and can be recovered following a power-loss or hard reset. +** +** Fsync is also called on the database file if (and only if) the entire +** WAL content is copied into the database file. This second fsync makes +** it safe to delete the WAL since the new content will persist in the +** database file. +** +** This routine uses and updates the nBackfill field of the wal-index header. +** This is the only routine tha will increase the value of nBackfill. +** (A WAL reset or recovery will revert nBackfill to zero, but not increase +** its value.) +** +** The caller must be holding sufficient locks to ensure that no other +** checkpoint is running (in any other thread or process) at the same +** time. +*/ +static int walCheckpoint( + Wal *pWal, /* Wal connection */ + int sync_flags, /* Flags for OsSync() (or 0) */ + int nBuf, /* Size of zBuf in bytes */ + u8 *zBuf /* Temporary buffer to use */ +){ + int rc; /* Return code */ + int szPage = pWal->hdr.szPage; /* Database page-size */ + WalIterator *pIter = 0; /* Wal iterator context */ + u32 iDbpage = 0; /* Next database page to write */ + u32 iFrame = 0; /* Wal frame containing data for iDbpage */ + u32 mxSafeFrame; /* Max frame that can be backfilled */ + int i; /* Loop counter */ + volatile WalCkptInfo *pInfo; /* The checkpoint status information */ + + if( pWal->hdr.mxFrame==0 ) return SQLITE_OK; + + /* Allocate the iterator */ + rc = walIteratorInit(pWal, &pIter); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pIter ); + + /*** TODO: Move this test out to the caller. Make it an assert() here ***/ + if( pWal->hdr.szPage!=nBuf ){ + rc = SQLITE_CORRUPT_BKPT; + goto walcheckpoint_out; + } + + /* Compute in mxSafeFrame the index of the last frame of the WAL that is + ** safe to write into the database. Frames beyond mxSafeFrame might + ** overwrite database pages that are in use by active readers and thus + ** cannot be backfilled from the WAL. + */ + mxSafeFrame = pWal->hdr.mxFrame; + pInfo = walCkptInfo(pWal); + for(i=1; iaReadMark[i]; + if( mxSafeFrame>=y ){ + assert( y<=pWal->hdr.mxFrame ); + rc = walLockExclusive(pWal, WAL_READ_LOCK(i), 1); + if( rc==SQLITE_OK ){ + pInfo->aReadMark[i] = READMARK_NOT_USED; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + }else if( rc==SQLITE_BUSY ){ + mxSafeFrame = y; + }else{ + goto walcheckpoint_out; + } + } + } + + if( pInfo->nBackfillnBackfill; + + /* Sync the WAL to disk */ + if( sync_flags ){ + rc = sqlite3OsSync(pWal->pWalFd, sync_flags); + } + + /* Iterate through the contents of the WAL, copying data to the db file. */ + while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ + i64 iOffset; + assert( walFramePgno(pWal, iFrame)==iDbpage ); + if( iFrame<=nBackfill || iFrame>mxSafeFrame ) continue; + iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ + rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; + iOffset = (iDbpage-1)*(i64)szPage; + testcase( IS_BIG_INT(iOffset) ); + rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; + } + + /* If work was actually accomplished... */ + if( rc==SQLITE_OK ){ + if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ + i64 szDb = pWal->hdr.nPage*(i64)szPage; + testcase( IS_BIG_INT(szDb) ); + rc = sqlite3OsTruncate(pWal->pDbFd, szDb); + if( rc==SQLITE_OK && sync_flags ){ + rc = sqlite3OsSync(pWal->pDbFd, sync_flags); + } + } + if( rc==SQLITE_OK ){ + pInfo->nBackfill = mxSafeFrame; + } + } + + /* Release the reader lock held while backfilling */ + walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); + }else if( rc==SQLITE_BUSY ){ + /* Reset the return code so as not to report a checkpoint failure + ** just because active readers prevent any backfill. + */ + rc = SQLITE_OK; + } + + walcheckpoint_out: + walIteratorFree(pIter); + return rc; +} + +/* +** Close a connection to a log file. +*/ +int sqlite3WalClose( + Wal *pWal, /* Wal to close */ + int sync_flags, /* Flags to pass to OsSync() (or 0) */ + int nBuf, + u8 *zBuf /* Buffer of at least nBuf bytes */ +){ + int rc = SQLITE_OK; + if( pWal ){ + int isDelete = 0; /* True to unlink wal and wal-index files */ + + /* If an EXCLUSIVE lock can be obtained on the database file (using the + ** ordinary, rollback-mode locking methods, this guarantees that the + ** connection associated with this log file is the only connection to + ** the database. In this case checkpoint the database and unlink both + ** the wal and wal-index files. + ** + ** The EXCLUSIVE lock is not released before returning. + */ + rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE); + if( rc==SQLITE_OK ){ + pWal->exclusiveMode = 1; + rc = sqlite3WalCheckpoint(pWal, sync_flags, nBuf, zBuf); + if( rc==SQLITE_OK ){ + isDelete = 1; + } + } + + walIndexClose(pWal, isDelete); + sqlite3OsClose(pWal->pWalFd); + if( isDelete ){ + sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0); + } + WALTRACE(("WAL%p: closed\n", pWal)); + sqlite3_free((void *)pWal->apWiData); + sqlite3_free(pWal); + } + return rc; +} + +/* +** Try to read the wal-index header. Return 0 on success and 1 if +** there is a problem. +** +** The wal-index is in shared memory. Another thread or process might +** be writing the header at the same time this procedure is trying to +** read it, which might result in inconsistency. A dirty read is detected +** by verifying that both copies of the header are the same and also by +** a checksum on the header. +** +** If and only if the read is consistent and the header is different from +** pWal->hdr, then pWal->hdr is updated to the content of the new header +** and *pChanged is set to 1. +** +** If the checksum cannot be verified return non-zero. If the header +** is read successfully and the checksum verified, return zero. +*/ +static int walIndexTryHdr(Wal *pWal, int *pChanged){ + u32 aCksum[2]; /* Checksum on the header content */ + WalIndexHdr h1, h2; /* Two copies of the header content */ + WalIndexHdr volatile *aHdr; /* Header in shared memory */ + + /* The first page of the wal-index must be mapped at this point. */ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + + /* Read the header. This might happen currently with a write to the + ** same area of shared memory on a different CPU in a SMP, + ** meaning it is possible that an inconsistent snapshot is read + ** from the file. If this happens, return non-zero. + ** + ** There are two copies of the header at the beginning of the wal-index. + ** When reading, read [0] first then [1]. Writes are in the reverse order. + ** Memory barriers are used to prevent the compiler or the hardware from + ** reordering the reads and writes. + */ + aHdr = walIndexHdr(pWal); + memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); + sqlite3OsShmBarrier(pWal->pDbFd); + memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); + + if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ + return 1; /* Dirty read */ + } + if( h1.isInit==0 ){ + return 1; /* Malformed header - probably all zeros */ + } + walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum); + if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){ + return 1; /* Checksum does not match */ + } + + if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){ + *pChanged = 1; + memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr)); + pWal->szPage = pWal->hdr.szPage; + } + + /* The header was successfully read. Return zero. */ + return 0; +} + +/* +** Read the wal-index header from the wal-index and into pWal->hdr. +** If the wal-header appears to be corrupt, try to reconstruct the +** wal-index from the WAL before returning. +** +** Set *pChanged to 1 if the wal-index header value in pWal->hdr is +** changed by this opertion. If pWal->hdr is unchanged, set *pChanged +** to 0. +** +** If the wal-index header is successfully read, return SQLITE_OK. +** Otherwise an SQLite error code. +*/ +static int walIndexReadHdr(Wal *pWal, int *pChanged){ + int rc; /* Return code */ + int badHdr; /* True if a header read failed */ + volatile u32 *page0; /* Chunk of wal-index containing header */ + + /* Ensure that page 0 of the wal-index (the page that contains the + ** wal-index header) is mapped. Return early if an error occurs here. + */ + assert( pChanged ); + rc = walIndexPage(pWal, 0, &page0); + if( rc!=SQLITE_OK ){ + return rc; + }; + assert( page0 || pWal->writeLock==0 ); + + /* If the first page of the wal-index has been mapped, try to read the + ** wal-index header immediately, without holding any lock. This usually + ** works, but may fail if the wal-index header is corrupt or currently + ** being modified by another thread or process. + */ + badHdr = (page0 ? walIndexTryHdr(pWal, pChanged) : 1); + + /* If the first attempt failed, it might have been due to a race + ** with a writer. So get a WRITE lock and try again. + */ + assert( badHdr==0 || pWal->writeLock==0 ); + if( badHdr && SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ + pWal->writeLock = 1; + if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ + badHdr = walIndexTryHdr(pWal, pChanged); + if( badHdr ){ + /* If the wal-index header is still malformed even while holding + ** a WRITE lock, it can only mean that the header is corrupted and + ** needs to be reconstructed. So run recovery to do exactly that. + */ + rc = walIndexRecover(pWal); + *pChanged = 1; + } + } + pWal->writeLock = 0; + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + } + + /* If the header is read successfully, check the version number to make + ** sure the wal-index was not constructed with some future format that + ** this version of SQLite cannot understand. + */ + if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + return rc; +} + +/* +** This is the value that walTryBeginRead returns when it needs to +** be retried. +*/ +#define WAL_RETRY (-1) + +/* +** Attempt to start a read transaction. This might fail due to a race or +** other transient condition. When that happens, it returns WAL_RETRY to +** indicate to the caller that it is safe to retry immediately. +** +** On success return SQLITE_OK. On a permanent failure (such an +** I/O error or an SQLITE_BUSY because another process is running +** recovery) return a positive error code. +** +** The useWal parameter is true to force the use of the WAL and disable +** the case where the WAL is bypassed because it has been completely +** checkpointed. If useWal==0 then this routine calls walIndexReadHdr() +** to make a copy of the wal-index header into pWal->hdr. If the +** wal-index header has changed, *pChanged is set to 1 (as an indication +** to the caller that the local paget cache is obsolete and needs to be +** flushed.) When useWal==1, the wal-index header is assumed to already +** be loaded and the pChanged parameter is unused. +** +** The caller must set the cnt parameter to the number of prior calls to +** this routine during the current read attempt that returned WAL_RETRY. +** This routine will start taking more aggressive measures to clear the +** race conditions after multiple WAL_RETRY returns, and after an excessive +** number of errors will ultimately return SQLITE_PROTOCOL. The +** SQLITE_PROTOCOL return indicates that some other process has gone rogue +** and is not honoring the locking protocol. There is a vanishingly small +** chance that SQLITE_PROTOCOL could be returned because of a run of really +** bad luck when there is lots of contention for the wal-index, but that +** possibility is so small that it can be safely neglected, we believe. +** +** On success, this routine obtains a read lock on +** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is +** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1) +** that means the Wal does not hold any read lock. The reader must not +** access any database page that is modified by a WAL frame up to and +** including frame number aReadMark[pWal->readLock]. The reader will +** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0 +** Or if pWal->readLock==0, then the reader will ignore the WAL +** completely and get all content directly from the database file. +** If the useWal parameter is 1 then the WAL will never be ignored and +** this routine will always set pWal->readLock>0 on success. +** When the read transaction is completed, the caller must release the +** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1. +** +** This routine uses the nBackfill and aReadMark[] fields of the header +** to select a particular WAL_READ_LOCK() that strives to let the +** checkpoint process do as much work as possible. This routine might +** update values of the aReadMark[] array in the header, but if it does +** so it takes care to hold an exclusive lock on the corresponding +** WAL_READ_LOCK() while changing values. +*/ +static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ + volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ + u32 mxReadMark; /* Largest aReadMark[] value */ + int mxI; /* Index of largest aReadMark[] value */ + int i; /* Loop counter */ + int rc = SQLITE_OK; /* Return code */ + + assert( pWal->readLock<0 ); /* Not currently locked */ + + /* Take steps to avoid spinning forever if there is a protocol error. */ + if( cnt>5 ){ + if( cnt>100 ) return SQLITE_PROTOCOL; + sqlite3OsSleep(pWal->pVfs, 1); + } + + if( !useWal ){ + rc = walIndexReadHdr(pWal, pChanged); + if( rc==SQLITE_BUSY ){ + /* If there is not a recovery running in another thread or process + ** then convert BUSY errors to WAL_RETRY. If recovery is known to + ** be running, convert BUSY to BUSY_RECOVERY. There is a race here + ** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY + ** would be technically correct. But the race is benign since with + ** WAL_RETRY this routine will be called again and will probably be + ** right on the second iteration. + */ + if( pWal->apWiData[0]==0 ){ + /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. + ** We assume this is a transient condition, so return WAL_RETRY. The + ** xShmMap() implementation used by the default unix and win32 VFS + ** modules may return SQLITE_BUSY due to a race condition in the + ** code that determines whether or not the shared-memory region + ** must be zeroed before the requested page is returned. + */ + rc = WAL_RETRY; + }else if( SQLITE_OK==(rc = walLockShared(pWal, WAL_RECOVER_LOCK)) ){ + walUnlockShared(pWal, WAL_RECOVER_LOCK); + rc = WAL_RETRY; + }else if( rc==SQLITE_BUSY ){ + rc = SQLITE_BUSY_RECOVERY; + } + } + if( rc!=SQLITE_OK ){ + return rc; + } + } + + pInfo = walCkptInfo(pWal); + if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame ){ + /* The WAL has been completely backfilled (or it is empty). + ** and can be safely ignored. + */ + rc = walLockShared(pWal, WAL_READ_LOCK(0)); + sqlite3OsShmBarrier(pWal->pDbFd); + if( rc==SQLITE_OK ){ + if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ + /* It is not safe to allow the reader to continue here if frames + ** may have been appended to the log before READ_LOCK(0) was obtained. + ** When holding READ_LOCK(0), the reader ignores the entire log file, + ** which implies that the database file contains a trustworthy + ** snapshoT. Since holding READ_LOCK(0) prevents a checkpoint from + ** happening, this is usually correct. + ** + ** However, if frames have been appended to the log (or if the log + ** is wrapped and written for that matter) before the READ_LOCK(0) + ** is obtained, that is not necessarily true. A checkpointer may + ** have started to backfill the appended frames but crashed before + ** it finished. Leaving a corrupt image in the database file. + */ + walUnlockShared(pWal, WAL_READ_LOCK(0)); + return WAL_RETRY; + } + pWal->readLock = 0; + return SQLITE_OK; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + + /* If we get this far, it means that the reader will want to use + ** the WAL to get at content from recent commits. The job now is + ** to select one of the aReadMark[] entries that is closest to + ** but not exceeding pWal->hdr.mxFrame and lock that entry. + */ + mxReadMark = 0; + mxI = 0; + for(i=1; iaReadMark[i]; + if( mxReadMark<=thisMark && thisMark<=pWal->hdr.mxFrame ){ + assert( thisMark!=READMARK_NOT_USED ); + mxReadMark = thisMark; + mxI = i; + } + } + if( mxI==0 ){ + /* If we get here, it means that all of the aReadMark[] entries between + ** 1 and WAL_NREADER-1 are zero. Try to initialize aReadMark[1] to + ** be mxFrame, then retry. + */ + rc = walLockExclusive(pWal, WAL_READ_LOCK(1), 1); + if( rc==SQLITE_OK ){ + pInfo->aReadMark[1] = pWal->hdr.mxFrame; + walUnlockExclusive(pWal, WAL_READ_LOCK(1), 1); + rc = WAL_RETRY; + }else if( rc==SQLITE_BUSY ){ + rc = WAL_RETRY; + } + return rc; + }else{ + if( mxReadMark < pWal->hdr.mxFrame ){ + for(i=1; iaReadMark[i] = pWal->hdr.mxFrame; + mxI = i; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + break; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + } + + rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + if( rc ){ + return rc==SQLITE_BUSY ? WAL_RETRY : rc; + } + /* Now that the read-lock has been obtained, check that neither the + ** value in the aReadMark[] array or the contents of the wal-index + ** header have changed. + ** + ** It is necessary to check that the wal-index header did not change + ** between the time it was read and when the shared-lock was obtained + ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility + ** that the log file may have been wrapped by a writer, or that frames + ** that occur later in the log than pWal->hdr.mxFrame may have been + ** copied into the database by a checkpointer. If either of these things + ** happened, then reading the database with the current value of + ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry + ** instead. + ** + ** This does not guarantee that the copy of the wal-index header is up to + ** date before proceeding. That would not be possible without somehow + ** blocking writers. It only guarantees that a dangerous checkpoint or + ** log-wrap (either of which would require an exclusive lock on + ** WAL_READ_LOCK(mxI)) has not occurred since the snapshot was valid. + */ + sqlite3OsShmBarrier(pWal->pDbFd); + if( pInfo->aReadMark[mxI]!=mxReadMark + || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) + ){ + walUnlockShared(pWal, WAL_READ_LOCK(mxI)); + return WAL_RETRY; + }else{ + assert( mxReadMark<=pWal->hdr.mxFrame ); + pWal->readLock = (i16)mxI; + } + } + return rc; +} + +/* +** Begin a read transaction on the database. +** +** This routine used to be called sqlite3OpenSnapshot() and with good reason: +** it takes a snapshot of the state of the WAL and wal-index for the current +** instant in time. The current thread will continue to use this snapshot. +** Other threads might append new content to the WAL and wal-index but +** that extra content is ignored by the current thread. +** +** If the database contents have changes since the previous read +** transaction, then *pChanged is set to 1 before returning. The +** Pager layer will use this to know that is cache is stale and +** needs to be flushed. +*/ +int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ + int rc; /* Return code */ + int cnt = 0; /* Number of TryBeginRead attempts */ + + do{ + rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); + }while( rc==WAL_RETRY ); + return rc; +} + +/* +** Finish with a read transaction. All this does is release the +** read-lock. +*/ +void sqlite3WalEndReadTransaction(Wal *pWal){ + if( pWal->readLock>=0 ){ + walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); + pWal->readLock = -1; + } +} + +/* +** Read a page from the WAL, if it is present in the WAL and if the +** current read transaction is configured to use the WAL. +** +** The *pInWal is set to 1 if the requested page is in the WAL and +** has been loaded. Or *pInWal is set to 0 if the page was not in +** the WAL and needs to be read out of the database. +*/ +int sqlite3WalRead( + Wal *pWal, /* WAL handle */ + Pgno pgno, /* Database page number to read data for */ + int *pInWal, /* OUT: True if data is read from WAL */ + int nOut, /* Size of buffer pOut in bytes */ + u8 *pOut /* Buffer to write page data to */ +){ + u32 iRead = 0; /* If !=0, WAL frame to return data from */ + u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */ + int iHash; /* Used to loop through N hash tables */ + + /* This routine is only be called from within a read transaction. */ + assert( pWal->readLock>=0 || pWal->lockError ); + + /* If the "last page" field of the wal-index header snapshot is 0, then + ** no data will be read from the wal under any circumstances. Return early + ** in this case as an optimization. Likewise, if pWal->readLock==0, + ** then the WAL is ignored by the reader so return early, as if the + ** WAL were empty. + */ + if( iLast==0 || pWal->readLock==0 ){ + *pInWal = 0; + return SQLITE_OK; + } + + /* Search the hash table or tables for an entry matching page number + ** pgno. Each iteration of the following for() loop searches one + ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames). + ** + ** This code might run concurrently to the code in walIndexAppend() + ** that adds entries to the wal-index (and possibly to this hash + ** table). This means the value just read from the hash + ** slot (aHash[iKey]) may have been added before or after the + ** current read transaction was opened. Values added after the + ** read transaction was opened may have been written incorrectly - + ** i.e. these slots may contain garbage data. However, we assume + ** that any slots written before the current read transaction was + ** opened remain unmodified. + ** + ** For the reasons above, the if(...) condition featured in the inner + ** loop of the following block is more stringent that would be required + ** if we had exclusive access to the hash-table: + ** + ** (aPgno[iFrame]==pgno): + ** This condition filters out normal hash-table collisions. + ** + ** (iFrame<=iLast): + ** This condition filters out entries that were added to the hash + ** table after the current read-transaction had started. + */ + for(iHash=walFramePage(iLast); iHash>=0 && iRead==0; iHash--){ + volatile ht_slot *aHash; /* Pointer to hash table */ + volatile u32 *aPgno; /* Pointer to array of page numbers */ + u32 iZero; /* Frame number corresponding to aPgno[0] */ + int iKey; /* Hash slot index */ + int nCollide; /* Number of hash collisions remaining */ + int rc; /* Error code */ + + rc = walHashGet(pWal, iHash, &aHash, &aPgno, &iZero); + if( rc!=SQLITE_OK ){ + return rc; + } + nCollide = HASHTABLE_NSLOT; + for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){ + u32 iFrame = aHash[iKey] + iZero; + if( iFrame<=iLast && aPgno[aHash[iKey]]==pgno ){ + assert( iFrame>iRead ); + iRead = iFrame; + } + if( (nCollide--)==0 ){ + return SQLITE_CORRUPT_BKPT; + } + } + } + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* If expensive assert() statements are available, do a linear search + ** of the wal-index file content. Make sure the results agree with the + ** result obtained using the hash indexes above. */ + { + u32 iRead2 = 0; + u32 iTest; + for(iTest=iLast; iTest>0; iTest--){ + if( walFramePgno(pWal, iTest)==pgno ){ + iRead2 = iTest; + break; + } + } + assert( iRead==iRead2 ); + } +#endif + + /* If iRead is non-zero, then it is the log frame number that contains the + ** required page. Read and return data from the log file. + */ + if( iRead ){ + i64 iOffset = walFrameOffset(iRead, pWal->hdr.szPage) + WAL_FRAME_HDRSIZE; + *pInWal = 1; + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ + return sqlite3OsRead(pWal->pWalFd, pOut, nOut, iOffset); + } + + *pInWal = 0; + return SQLITE_OK; +} + + +/* +** Set *pPgno to the size of the database file (or zero, if unknown). +*/ +void sqlite3WalDbsize(Wal *pWal, Pgno *pPgno){ + assert( pWal->readLock>=0 || pWal->lockError ); + *pPgno = pWal->hdr.nPage; +} + + +/* +** This function starts a write transaction on the WAL. +** +** A read transaction must have already been started by a prior call +** to sqlite3WalBeginReadTransaction(). +** +** If another thread or process has written into the database since +** the read transaction was started, then it is not possible for this +** thread to write as doing so would cause a fork. So this routine +** returns SQLITE_BUSY in that case and no write transaction is started. +** +** There can only be a single writer active at a time. +*/ +int sqlite3WalBeginWriteTransaction(Wal *pWal){ + int rc; + + /* Cannot start a write transaction without first holding a read + ** transaction. */ + assert( pWal->readLock>=0 ); + + if( pWal->readOnly ){ + return SQLITE_READONLY; + } + + /* Only one writer allowed at a time. Get the write lock. Return + ** SQLITE_BUSY if unable. + */ + rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); + if( rc ){ + return rc; + } + pWal->writeLock = 1; + + /* If another connection has written to the database file since the + ** time the read transaction on this connection was started, then + ** the write is disallowed. + */ + if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + pWal->writeLock = 0; + rc = SQLITE_BUSY; + } + + return rc; +} + +/* +** End a write transaction. The commit has already been done. This +** routine merely releases the lock. +*/ +int sqlite3WalEndWriteTransaction(Wal *pWal){ + if( pWal->writeLock ){ + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + pWal->writeLock = 0; + } + return SQLITE_OK; +} + +/* +** If any data has been written (but not committed) to the log file, this +** function moves the write-pointer back to the start of the transaction. +** +** Additionally, the callback function is invoked for each frame written +** to the WAL since the start of the transaction. If the callback returns +** other than SQLITE_OK, it is not invoked again and the error code is +** returned to the caller. +** +** Otherwise, if the callback function does not return an error, this +** function returns SQLITE_OK. +*/ +int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){ + int rc = SQLITE_OK; + if( pWal->writeLock ){ + Pgno iMax = pWal->hdr.mxFrame; + Pgno iFrame; + + /* Restore the clients cache of the wal-index header to the state it + ** was in before the client began writing to the database. + */ + memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); + + for(iFrame=pWal->hdr.mxFrame+1; + ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; + iFrame++ + ){ + /* This call cannot fail. Unless the page for which the page number + ** is passed as the second argument is (a) in the cache and + ** (b) has an outstanding reference, then xUndo is either a no-op + ** (if (a) is false) or simply expels the page from the cache (if (b) + ** is false). + ** + ** If the upper layer is doing a rollback, it is guaranteed that there + ** are no outstanding references to any page other than page 1. And + ** page 1 is never written to the log until the transaction is + ** committed. As a result, the call to xUndo may not fail. + */ + assert( walFramePgno(pWal, iFrame)!=1 ); + rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + } + walCleanupHash(pWal); + } + assert( rc==SQLITE_OK ); + return rc; +} + +/* +** Argument aWalData must point to an array of WAL_SAVEPOINT_NDATA u32 +** values. This function populates the array with values required to +** "rollback" the write position of the WAL handle back to the current +** point in the event of a savepoint rollback (via WalSavepointUndo()). +*/ +void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){ + assert( pWal->writeLock ); + aWalData[0] = pWal->hdr.mxFrame; + aWalData[1] = pWal->hdr.aFrameCksum[0]; + aWalData[2] = pWal->hdr.aFrameCksum[1]; + aWalData[3] = pWal->nCkpt; +} + +/* +** Move the write position of the WAL back to the point identified by +** the values in the aWalData[] array. aWalData must point to an array +** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated +** by a call to WalSavepoint(). +*/ +int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ + int rc = SQLITE_OK; + + assert( pWal->writeLock ); + assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame ); + + if( aWalData[3]!=pWal->nCkpt ){ + /* This savepoint was opened immediately after the write-transaction + ** was started. Right after that, the writer decided to wrap around + ** to the start of the log. Update the savepoint values to match. + */ + aWalData[0] = 0; + aWalData[3] = pWal->nCkpt; + } + + if( aWalData[0]hdr.mxFrame ){ + pWal->hdr.mxFrame = aWalData[0]; + pWal->hdr.aFrameCksum[0] = aWalData[1]; + pWal->hdr.aFrameCksum[1] = aWalData[2]; + walCleanupHash(pWal); + } + + return rc; +} + +/* +** This function is called just before writing a set of frames to the log +** file (see sqlite3WalFrames()). It checks to see if, instead of appending +** to the current log file, it is possible to overwrite the start of the +** existing log file with the new frames (i.e. "reset" the log). If so, +** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left +** unchanged. +** +** SQLITE_OK is returned if no error is encountered (regardless of whether +** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned +** if some error +*/ +static int walRestartLog(Wal *pWal){ + int rc = SQLITE_OK; + int cnt; + + if( pWal->readLock==0 ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + assert( pInfo->nBackfill==pWal->hdr.mxFrame ); + if( pInfo->nBackfill>0 ){ + rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); + if( rc==SQLITE_OK ){ + /* If all readers are using WAL_READ_LOCK(0) (in other words if no + ** readers are currently using the WAL), then the transactions + ** frames will overwrite the start of the existing log. Update the + ** wal-index header to reflect this. + ** + ** In theory it would be Ok to update the cache of the header only + ** at this point. But updating the actual wal-index header is also + ** safe and means there is no special case for sqlite3WalUndo() + ** to handle if this transaction is rolled back. + */ + int i; /* Loop counter */ + u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */ + pWal->nCkpt++; + pWal->hdr.mxFrame = 0; + sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0])); + sqlite3_randomness(4, &aSalt[1]); + walIndexWriteHdr(pWal); + pInfo->nBackfill = 0; + for(i=1; iaReadMark[i] = READMARK_NOT_USED; + assert( pInfo->aReadMark[0]==0 ); + walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); + } + } + walUnlockShared(pWal, WAL_READ_LOCK(0)); + pWal->readLock = -1; + cnt = 0; + do{ + int notUsed; + rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + }while( rc==WAL_RETRY ); + } + return rc; +} + +/* +** Write a set of frames to the log. The caller must hold the write-lock +** on the log file (obtained using sqlite3WalBeginWriteTransaction()). +*/ +int sqlite3WalFrames( + Wal *pWal, /* Wal handle to write to */ + int szPage, /* Database page-size in bytes */ + PgHdr *pList, /* List of dirty pages to write */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit, /* True if this is a commit */ + int sync_flags /* Flags to pass to OsSync() (or 0) */ +){ + int rc; /* Used to catch return codes */ + u32 iFrame; /* Next frame address */ + u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */ + PgHdr *p; /* Iterator to run through pList with. */ + PgHdr *pLast = 0; /* Last frame in list */ + int nLast = 0; /* Number of extra copies of last page */ + + assert( pList ); + assert( pWal->writeLock ); + +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) + { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} + WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n", + pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill")); + } +#endif + + /* See if it is possible to write these frames into the start of the + ** log file, instead of appending to it at pWal->hdr.mxFrame. + */ + if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ + return rc; + } + + /* If this is the first frame written into the log, write the WAL + ** header to the start of the WAL file. See comments at the top of + ** this source file for a description of the WAL header format. + */ + iFrame = pWal->hdr.mxFrame; + if( iFrame==0 ){ + u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */ + u32 aCksum[2]; /* Checksum for wal-header */ + + sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN)); + sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION); + sqlite3Put4byte(&aWalHdr[8], szPage); + sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt); + sqlite3_randomness(8, pWal->hdr.aSalt); + memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8); + walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum); + sqlite3Put4byte(&aWalHdr[24], aCksum[0]); + sqlite3Put4byte(&aWalHdr[28], aCksum[1]); + + pWal->szPage = (u16)szPage; + pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN; + pWal->hdr.aFrameCksum[0] = aCksum[0]; + pWal->hdr.aFrameCksum[1] = aCksum[1]; + + rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0); + WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok")); + if( rc!=SQLITE_OK ){ + return rc; + } + } + assert( pWal->szPage==szPage ); + + /* Write the log file. */ + for(p=pList; p; p=p->pDirty){ + u32 nDbsize; /* Db-size field for frame header */ + i64 iOffset; /* Write offset in log file */ + void *pData; + + iOffset = walFrameOffset(++iFrame, szPage); + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ + + /* Populate and write the frame header */ + nDbsize = (isCommit && p->pDirty==0) ? nTruncate : 0; +#if defined(SQLITE_HAS_CODEC) + if( (pData = sqlite3PagerCodec(p))==0 ) return SQLITE_NOMEM; +#else + pData = p->pData; +#endif + walEncodeFrame(pWal, p->pgno, nDbsize, pData, aFrame); + rc = sqlite3OsWrite(pWal->pWalFd, aFrame, sizeof(aFrame), iOffset); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Write the page data */ + rc = sqlite3OsWrite(pWal->pWalFd, pData, szPage, iOffset+sizeof(aFrame)); + if( rc!=SQLITE_OK ){ + return rc; + } + pLast = p; + } + + /* Sync the log file if the 'isSync' flag was specified. */ + if( sync_flags ){ + i64 iSegment = sqlite3OsSectorSize(pWal->pWalFd); + i64 iOffset = walFrameOffset(iFrame+1, szPage); + + assert( isCommit ); + assert( iSegment>0 ); + + iSegment = (((iOffset+iSegment-1)/iSegment) * iSegment); + while( iOffsetpData; +#endif + walEncodeFrame(pWal, pLast->pgno, nTruncate, pData, aFrame); + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ + rc = sqlite3OsWrite(pWal->pWalFd, aFrame, sizeof(aFrame), iOffset); + if( rc!=SQLITE_OK ){ + return rc; + } + iOffset += WAL_FRAME_HDRSIZE; + rc = sqlite3OsWrite(pWal->pWalFd, pData, szPage, iOffset); + if( rc!=SQLITE_OK ){ + return rc; + } + nLast++; + iOffset += szPage; + } + + rc = sqlite3OsSync(pWal->pWalFd, sync_flags); + } + + /* Append data to the wal-index. It is not necessary to lock the + ** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index + ** guarantees that there are no other writers, and no data that may + ** be in use by existing readers is being overwritten. + */ + iFrame = pWal->hdr.mxFrame; + for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){ + iFrame++; + rc = walIndexAppend(pWal, iFrame, p->pgno); + } + while( nLast>0 && rc==SQLITE_OK ){ + iFrame++; + nLast--; + rc = walIndexAppend(pWal, iFrame, pLast->pgno); + } + + if( rc==SQLITE_OK ){ + /* Update the private copy of the header. */ + pWal->hdr.szPage = (u16)szPage; + pWal->hdr.mxFrame = iFrame; + if( isCommit ){ + pWal->hdr.iChange++; + pWal->hdr.nPage = nTruncate; + } + /* If this is a commit, update the wal-index header too. */ + if( isCommit ){ + walIndexWriteHdr(pWal); + pWal->iCallback = iFrame; + } + } + + WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok")); + return rc; +} + +/* +** This routine is called to implement sqlite3_wal_checkpoint() and +** related interfaces. +** +** Obtain a CHECKPOINT lock and then backfill as much information as +** we can from WAL into the database. +*/ +int sqlite3WalCheckpoint( + Wal *pWal, /* Wal connection */ + int sync_flags, /* Flags to sync db file with (or 0) */ + int nBuf, /* Size of temporary buffer */ + u8 *zBuf /* Temporary buffer to use */ +){ + int rc; /* Return code */ + int isChanged = 0; /* True if a new wal-index header is loaded */ + + assert( pWal->ckptLock==0 ); + + WALTRACE(("WAL%p: checkpoint begins\n", pWal)); + rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); + if( rc ){ + /* Usually this is SQLITE_BUSY meaning that another thread or process + ** is already running a checkpoint, or maybe a recovery. But it might + ** also be SQLITE_IOERR. */ + return rc; + } + pWal->ckptLock = 1; + + /* Copy data from the log to the database file. */ + rc = walIndexReadHdr(pWal, &isChanged); + if( rc==SQLITE_OK ){ + rc = walCheckpoint(pWal, sync_flags, nBuf, zBuf); + } + if( isChanged ){ + /* If a new wal-index header was loaded before the checkpoint was + ** performed, then the pager-cache associated with pWal is now + ** out of date. So zero the cached wal-index header to ensure that + ** next time the pager opens a snapshot on this database it knows that + ** the cache needs to be reset. + */ + memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); + } + + /* Release the locks. */ + walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); + pWal->ckptLock = 0; + WALTRACE(("WAL%p: checkpoint %s\n", pWal, rc ? "failed" : "ok")); + return rc; +} + +/* Return the value to pass to a sqlite3_wal_hook callback, the +** number of frames in the WAL at the point of the last commit since +** sqlite3WalCallback() was called. If no commits have occurred since +** the last call, then return 0. +*/ +int sqlite3WalCallback(Wal *pWal){ + u32 ret = 0; + if( pWal ){ + ret = pWal->iCallback; + pWal->iCallback = 0; + } + return (int)ret; +} + +/* +** This function is called to change the WAL subsystem into or out +** of locking_mode=EXCLUSIVE. +** +** If op is zero, then attempt to change from locking_mode=EXCLUSIVE +** into locking_mode=NORMAL. This means that we must acquire a lock +** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL +** or if the acquisition of the lock fails, then return 0. If the +** transition out of exclusive-mode is successful, return 1. This +** operation must occur while the pager is still holding the exclusive +** lock on the main database file. +** +** If op is one, then change from locking_mode=NORMAL into +** locking_mode=EXCLUSIVE. This means that the pWal->readLock must +** be released. Return 1 if the transition is made and 0 if the +** WAL is already in exclusive-locking mode - meaning that this +** routine is a no-op. The pager must already hold the exclusive lock +** on the main database file before invoking this operation. +** +** If op is negative, then do a dry-run of the op==1 case but do +** not actually change anything. The pager uses this to see if it +** should acquire the database exclusive lock prior to invoking +** the op==1 case. +*/ +int sqlite3WalExclusiveMode(Wal *pWal, int op){ + int rc; + assert( pWal->writeLock==0 ); + + /* pWal->readLock is usually set, but might be -1 if there was a + ** prior error while attempting to acquire are read-lock. This cannot + ** happen if the connection is actually in exclusive mode (as no xShmLock + ** locks are taken in this case). Nor should the pager attempt to + ** upgrade to exclusive-mode following such an error. + */ + assert( pWal->readLock>=0 || pWal->lockError ); + assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); + + if( op==0 ){ + if( pWal->exclusiveMode ){ + pWal->exclusiveMode = 0; + if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){ + pWal->exclusiveMode = 1; + } + rc = pWal->exclusiveMode==0; + }else{ + /* Already in locking_mode=NORMAL */ + rc = 0; + } + }else if( op>0 ){ + assert( pWal->exclusiveMode==0 ); + assert( pWal->readLock>=0 ); + walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); + pWal->exclusiveMode = 1; + rc = 1; + }else{ + rc = pWal->exclusiveMode==0; + } + return rc; +} + +#endif /* #ifndef SQLITE_OMIT_WAL */ diff --git a/src/wal.h b/src/wal.h new file mode 100644 index 0000000..f29d583 --- /dev/null +++ b/src/wal.h @@ -0,0 +1,107 @@ +/* +** 2010 February 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface to the write-ahead logging +** system. Refer to the comments below and the header comment attached to +** the implementation of each function in log.c for further details. +*/ + +#ifndef _WAL_H_ +#define _WAL_H_ + +#include "sqliteInt.h" + +#ifdef SQLITE_OMIT_WAL +# define sqlite3WalOpen(x,y,z) 0 +# define sqlite3WalClose(w,x,y,z) 0 +# define sqlite3WalBeginReadTransaction(y,z) 0 +# define sqlite3WalEndReadTransaction(z) +# define sqlite3WalRead(v,w,x,y,z) 0 +# define sqlite3WalDbsize(y,z) +# define sqlite3WalBeginWriteTransaction(y) 0 +# define sqlite3WalEndWriteTransaction(x) 0 +# define sqlite3WalUndo(x,y,z) 0 +# define sqlite3WalSavepoint(y,z) +# define sqlite3WalSavepointUndo(y,z) 0 +# define sqlite3WalFrames(u,v,w,x,y,z) 0 +# define sqlite3WalCheckpoint(u,v,w,x) 0 +# define sqlite3WalCallback(z) 0 +# define sqlite3WalExclusiveMode(y,z) 0 +#else + +#define WAL_SAVEPOINT_NDATA 4 + +/* Connection to a write-ahead log (WAL) file. +** There is one object of this type for each pager. +*/ +typedef struct Wal Wal; + +/* Open and close a connection to a write-ahead log. */ +int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *zName, Wal**); +int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *); + +/* Used by readers to open (lock) and close (unlock) a snapshot. A +** snapshot is like a read-transaction. It is the state of the database +** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and +** preserves the current state even if the other threads or processes +** write to or checkpoint the WAL. sqlite3WalCloseSnapshot() closes the +** transaction and releases the lock. +*/ +int sqlite3WalBeginReadTransaction(Wal *pWal, int *); +void sqlite3WalEndReadTransaction(Wal *pWal); + +/* Read a page from the write-ahead log, if it is present. */ +int sqlite3WalRead(Wal *pWal, Pgno pgno, int *pInWal, int nOut, u8 *pOut); + +/* Return the size of the database as it existed at the beginning +** of the snapshot */ +void sqlite3WalDbsize(Wal *pWal, Pgno *pPgno); + +/* Obtain or release the WRITER lock. */ +int sqlite3WalBeginWriteTransaction(Wal *pWal); +int sqlite3WalEndWriteTransaction(Wal *pWal); + +/* Undo any frames written (but not committed) to the log */ +int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx); + +/* Return an integer that records the current (uncommitted) write +** position in the WAL */ +void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData); + +/* Move the write position of the WAL back to iFrame. Called in +** response to a ROLLBACK TO command. */ +int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData); + +/* Write a frame or frames to the log. */ +int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int); + +/* Copy pages from the log to the database file */ +int sqlite3WalCheckpoint( + Wal *pWal, /* Write-ahead log connection */ + int sync_flags, /* Flags to sync db file with (or 0) */ + int nBuf, /* Size of buffer nBuf */ + u8 *zBuf /* Temporary buffer to use */ +); + +/* Return the value to pass to a sqlite3_wal_hook callback, the +** number of frames in the WAL at the point of the last commit since +** sqlite3WalCallback() was called. If no commits have occurred since +** the last call, then return 0. +*/ +int sqlite3WalCallback(Wal *pWal); + +/* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released) +** by the pager layer on the database file. +*/ +int sqlite3WalExclusiveMode(Wal *pWal, int op); + +#endif /* ifndef SQLITE_OMIT_WAL */ +#endif /* _WAL_H_ */ diff --git a/src/where.c b/src/where.c index 5f81ce0..b2f75ea 100644 --- a/src/where.c +++ b/src/where.c @@ -235,6 +235,7 @@ struct WhereCost { #define WHERE_COLUMN_IN 0x00040000 /* x IN (...) */ #define WHERE_COLUMN_NULL 0x00080000 /* x IS NULL */ #define WHERE_INDEXED 0x000f0000 /* Anything that uses an index */ +#define WHERE_NOT_FULLSCAN 0x000f3000 /* Does not do a full table scan */ #define WHERE_IN_ABLE 0x000f1000 /* Able to support an IN operator */ #define WHERE_TOP_LIMIT 0x00100000 /* xEXPR or x>=EXPR constraint */ @@ -244,6 +245,7 @@ struct WhereCost { #define WHERE_UNIQUE 0x04000000 /* Selects no more than one row */ #define WHERE_VIRTUALTABLE 0x08000000 /* Use virtual-table processing */ #define WHERE_MULTI_OR 0x10000000 /* OR using multiple indices */ +#define WHERE_TEMP_INDEX 0x20000000 /* Uses an ephemeral index */ /* ** Initialize a preallocated WhereClause structure. @@ -1573,6 +1575,11 @@ static void bestOrClauseIndex( WhereTerm * const pWCEnd = &pWC->a[pWC->nTerm]; /* End of pWC->a[] */ WhereTerm *pTerm; /* A single term of the WHERE clause */ + /* No OR-clause optimization allowed if the NOT INDEXED clause is used */ + if( pSrc->notIndexed ){ + return; + } + /* Search the WHERE clause terms for a usable WO_OR term. */ for(pTerm=pWC->a; pTermeOperator==WO_OR @@ -1615,8 +1622,9 @@ static void bestOrClauseIndex( /* If there is an ORDER BY clause, increase the scan cost to account ** for the cost of the sort. */ if( pOrderBy!=0 ){ + WHERETRACE(("... sorting increases OR cost %.9g to %.9g\n", + rTotal, rTotal+nRow*estLog(nRow))); rTotal += nRow*estLog(nRow); - WHERETRACE(("... sorting increases OR cost to %.9g\n", rTotal)); } /* If the cost of scanning using this OR term for optimization is @@ -1635,6 +1643,247 @@ static void bestOrClauseIndex( #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ } +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Return TRUE if the WHERE clause term pTerm is of a form where it +** could be used with an index to access pSrc, assuming an appropriate +** index existed. +*/ +static int termCanDriveIndex( + WhereTerm *pTerm, /* WHERE clause term to check */ + struct SrcList_item *pSrc, /* Table we are trying to access */ + Bitmask notReady /* Tables in outer loops of the join */ +){ + char aff; + if( pTerm->leftCursor!=pSrc->iCursor ) return 0; + if( pTerm->eOperator!=WO_EQ ) return 0; + if( (pTerm->prereqRight & notReady)!=0 ) return 0; + aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity; + if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; + return 1; +} +#endif + +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** If the query plan for pSrc specified in pCost is a full table scan +** and indexing is allows (if there is no NOT INDEXED clause) and it +** possible to construct a transient index that would perform better +** than a full table scan even when the cost of constructing the index +** is taken into account, then alter the query plan to use the +** transient index. +*/ +static void bestAutomaticIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to search */ + Bitmask notReady, /* Mask of cursors that are not available */ + WhereCost *pCost /* Lowest cost query plan */ +){ + double nTableRow; /* Rows in the input table */ + double logN; /* log(nTableRow) */ + double costTempIdx; /* per-query cost of the transient index */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ + WhereTerm *pWCEnd; /* End of pWC->a[] */ + Table *pTable; /* Table tht might be indexed */ + + if( (pParse->db->flags & SQLITE_AutoIndex)==0 ){ + /* Automatic indices are disabled at run-time */ + return; + } + if( (pCost->plan.wsFlags & WHERE_NOT_FULLSCAN)!=0 ){ + /* We already have some kind of index in use for this query. */ + return; + } + if( pSrc->notIndexed ){ + /* The NOT INDEXED clause appears in the SQL. */ + return; + } + + assert( pParse->nQueryLoop >= (double)1 ); + pTable = pSrc->pTab; + nTableRow = pTable->pIndex ? pTable->pIndex->aiRowEst[0] : 1000000; + logN = estLog(nTableRow); + costTempIdx = 2*logN*(nTableRow/pParse->nQueryLoop + 1); + if( costTempIdx>=pCost->rCost ){ + /* The cost of creating the transient table would be greater than + ** doing the full table scan */ + return; + } + + /* Search for any equality comparison term */ + pWCEnd = &pWC->a[pWC->nTerm]; + for(pTerm=pWC->a; pTermrCost, costTempIdx)); + pCost->rCost = costTempIdx; + pCost->nRow = logN + 1; + pCost->plan.wsFlags = WHERE_TEMP_INDEX; + pCost->used = pTerm->prereqRight; + break; + } + } +} +#else +# define bestAutomaticIndex(A,B,C,D,E) /* no-op */ +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + + +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Generate code to construct the Index object for an automatic index +** and to set up the WhereLevel object pLevel so that the code generator +** makes use of the automatic index. +*/ +static void constructAutomaticIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ + Bitmask notReady, /* Mask of cursors that are not available */ + WhereLevel *pLevel /* Write new index here */ +){ + int nColumn; /* Number of columns in the constructed index */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ + WhereTerm *pWCEnd; /* End of pWC->a[] */ + int nByte; /* Byte of memory needed for pIdx */ + Index *pIdx; /* Object describing the transient index */ + Vdbe *v; /* Prepared statement under construction */ + int regIsInit; /* Register set by initialization */ + int addrInit; /* Address of the initialization bypass jump */ + Table *pTable; /* The table being indexed */ + KeyInfo *pKeyinfo; /* Key information for the index */ + int addrTop; /* Top of the index fill loop */ + int regRecord; /* Register holding an index record */ + int n; /* Column counter */ + int i; /* Loop counter */ + int mxBitCol; /* Maximum column in pSrc->colUsed */ + CollSeq *pColl; /* Collating sequence to on a column */ + Bitmask idxCols; /* Bitmap of columns used for indexing */ + Bitmask extraCols; /* Bitmap of additional columns */ + + /* Generate code to skip over the creation and initialization of the + ** transient index on 2nd and subsequent iterations of the loop. */ + v = pParse->pVdbe; + assert( v!=0 ); + regIsInit = ++pParse->nMem; + addrInit = sqlite3VdbeAddOp1(v, OP_If, regIsInit); + sqlite3VdbeAddOp2(v, OP_Integer, 1, regIsInit); + + /* Count the number of columns that will be added to the index + ** and used to match WHERE clause constraints */ + nColumn = 0; + pTable = pSrc->pTab; + pWCEnd = &pWC->a[pWC->nTerm]; + idxCols = 0; + for(pTerm=pWC->a; pTermu.leftColumn; + Bitmask cMask = iCol>=BMS ? ((Bitmask)1)<<(BMS-1) : ((Bitmask)1)<0 ); + pLevel->plan.nEq = nColumn; + + /* Count the number of additional columns needed to create a + ** covering index. A "covering index" is an index that contains all + ** columns that are needed by the query. With a covering index, the + ** original table never needs to be accessed. Automatic indices must + ** be a covering index because the index will not be updated if the + ** original table changes and the index and table cannot both be used + ** if they go out of sync. + */ + extraCols = pSrc->colUsed & (~idxCols | (((Bitmask)1)<<(BMS-1))); + mxBitCol = (pTable->nCol >= BMS-1) ? BMS-1 : pTable->nCol; + testcase( pTable->nCol==BMS-1 ); + testcase( pTable->nCol==BMS-2 ); + for(i=0; icolUsed & (((Bitmask)1)<<(BMS-1)) ){ + nColumn += pTable->nCol - BMS + 1; + } + pLevel->plan.wsFlags |= WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WO_EQ; + + /* Construct the Index object to describe this index */ + nByte = sizeof(Index); + nByte += nColumn*sizeof(int); /* Index.aiColumn */ + nByte += nColumn*sizeof(char*); /* Index.azColl */ + nByte += nColumn; /* Index.aSortOrder */ + pIdx = sqlite3DbMallocZero(pParse->db, nByte); + if( pIdx==0 ) return; + pLevel->plan.u.pIdx = pIdx; + pIdx->azColl = (char**)&pIdx[1]; + pIdx->aiColumn = (int*)&pIdx->azColl[nColumn]; + pIdx->aSortOrder = (u8*)&pIdx->aiColumn[nColumn]; + pIdx->zName = "auto-index"; + pIdx->nColumn = nColumn; + pIdx->pTable = pTable; + n = 0; + idxCols = 0; + for(pTerm=pWC->a; pTermu.leftColumn; + Bitmask cMask = iCol>=BMS ? ((Bitmask)1)<<(BMS-1) : ((Bitmask)1)<pExpr; + idxCols |= cMask; + pIdx->aiColumn[n] = pTerm->u.leftColumn; + pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); + pIdx->azColl[n] = pColl->zName; + n++; + } + } + } + assert( (u32)n==pLevel->plan.nEq ); + + /* Add additional columns needed to make the automatic index into + ** a covering index */ + for(i=0; iaiColumn[n] = i; + pIdx->azColl[n] = "BINARY"; + n++; + } + } + if( pSrc->colUsed & (((Bitmask)1)<<(BMS-1)) ){ + for(i=BMS-1; inCol; i++){ + pIdx->aiColumn[n] = i; + pIdx->azColl[n] = "BINARY"; + n++; + } + } + assert( n==nColumn ); + + /* Create the automatic index */ + pKeyinfo = sqlite3IndexKeyinfo(pParse, pIdx); + assert( pLevel->iIdxCur>=0 ); + sqlite3VdbeAddOp4(v, OP_OpenAutoindex, pLevel->iIdxCur, nColumn+1, 0, + (char*)pKeyinfo, P4_KEYINFO_HANDOFF); + VdbeComment((v, "for %s", pTable->zName)); + + /* Fill the automatic index with content */ + addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); + regRecord = sqlite3GetTempReg(pParse); + sqlite3GenerateIndexKey(pParse, pIdx, pLevel->iTabCur, regRecord, 1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); + sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); + sqlite3VdbeJumpHere(v, addrTop); + sqlite3ReleaseTempReg(pParse, regRecord); + + /* Jump here when skipping the initialization */ + sqlite3VdbeJumpHere(v, addrInit); +} +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Allocate and populate an sqlite3_index_info structure. It is the @@ -1819,6 +2068,7 @@ static void bestVirtualIndex( WhereTerm *pTerm; int i, j; int nOrderBy; + double rCost; /* Make sure wsFlags is initialized to some sane value. Otherwise, if the ** malloc in allocateIndexInfo() fails and this function returns leaving @@ -1905,6 +2155,15 @@ static void bestVirtualIndex( } } + /* If there is an ORDER BY clause, and the selected virtual table index + ** does not satisfy it, increase the cost of the scan accordingly. This + ** matches the processing for non-virtual tables in bestBtreeIndex(). + */ + rCost = pIdxInfo->estimatedCost; + if( pOrderBy && pIdxInfo->orderByConsumed==0 ){ + rCost += estLog(rCost)*rCost; + } + /* The cost is not allowed to be larger than SQLITE_BIG_DBL (the ** inital value of lowestCost in this loop. If it is, then the ** (costestimatedCost ){ + if( (SQLITE_BIG_DBL/((double)2))rCost = (SQLITE_BIG_DBL/((double)2)); }else{ - pCost->rCost = pIdxInfo->estimatedCost; + pCost->rCost = rCost; } pCost->plan.u.pVtabIdx = pIdxInfo; if( pIdxInfo->orderByConsumed ){ @@ -2316,14 +2575,14 @@ static void bestBtreeIndex( ** Set to true if there was at least one "x IN (SELECT ...)" term used ** in determining the value of nInMul. ** - ** nBound: + ** estBound: ** An estimate on the amount of the table that must be searched. A ** value of 100 means the entire table is searched. Range constraints ** might reduce this to a value less than 100 to indicate that only ** a fraction of the table needs searching. In the absence of ** sqlite_stat2 ANALYZE data, a single inequality reduces the search ** space to 1/3rd its original size. So an x>? constraint reduces - ** nBound to 33. Two constraints (x>? AND x? AND xnColumn; nEq++){ - WhereTerm *pTerm; /* A single term of the WHERE clause */ int j = pProbe->aiColumn[nEq]; pTerm = findTerm(pWC, iCur, j, notReady, eqTermMask, pIdx); if( pTerm==0 ) break; @@ -2362,7 +2622,7 @@ static void bestBtreeIndex( if( ExprHasProperty(pExpr, EP_xIsSelect) ){ nInMul *= 25; bInEst = 1; - }else if( pExpr->x.pList ){ + }else if( ALWAYS(pExpr->x.pList) ){ nInMul *= pExpr->x.pList->nExpr + 1; } }else if( pTerm->eOperator & WO_ISNULL ){ @@ -2371,18 +2631,20 @@ static void bestBtreeIndex( used |= pTerm->prereqRight; } - /* Determine the value of nBound. */ + /* Determine the value of estBound. */ if( nEqnColumn ){ int j = pProbe->aiColumn[nEq]; if( findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE|WO_GT|WO_GE, pIdx) ){ WhereTerm *pTop = findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE, pIdx); WhereTerm *pBtm = findTerm(pWC, iCur, j, notReady, WO_GT|WO_GE, pIdx); - whereRangeScanEst(pParse, pProbe, nEq, pBtm, pTop, &nBound); + whereRangeScanEst(pParse, pProbe, nEq, pBtm, pTop, &estBound); if( pTop ){ + nBound = 1; wsFlags |= WHERE_TOP_LIMIT; used |= pTop->prereqRight; } if( pBtm ){ + nBound++; wsFlags |= WHERE_BTM_LIMIT; used |= pBtm->prereqRight; } @@ -2413,7 +2675,7 @@ static void bestBtreeIndex( /* If currently calculating the cost of using an index (not the IPK ** index), determine if all required column data may be obtained without - ** seeking to entries in the main table (i.e. if the index is a covering + ** using the main table (i.e. if the index is a covering ** index for this query). If it is, set the WHERE_IDX_ONLY flag in ** wsFlags. Otherwise, set the bLookup variable to true. */ if( pIdx && wsFlags ){ @@ -2432,8 +2694,7 @@ static void bestBtreeIndex( } } - /**** Begin adding up the cost of using this index (Needs improvements) - ** + /* ** Estimate the number of rows of output. For an IN operator, ** do not let the estimate exceed half the rows in the table. */ @@ -2452,8 +2713,8 @@ static void bestBtreeIndex( /* Adjust the number of rows and the cost downward to reflect rows ** that are excluded by range constraints. */ - nRow = (nRow * (double)nBound) / (double)100; - cost = (cost * (double)nBound) / (double)100; + nRow = (nRow * (double)estBound) / (double)100; + cost = (cost * (double)estBound) / (double)100; /* Add in the estimated cost of sorting the result */ @@ -2470,17 +2731,75 @@ static void bestBtreeIndex( } /**** Cost of using this index has now been computed ****/ + /* If there are additional constraints on this table that cannot + ** be used with the current index, but which might lower the number + ** of output rows, adjust the nRow value accordingly. This only + ** matters if the current index is the least costly, so do not bother + ** with this step if we already know this index will not be chosen. + ** Also, never reduce the output row count below 2 using this step. + ** + ** Do not reduce the output row count if pSrc is the only table that + ** is notReady; if notReady is a power of two. This will be the case + ** when the main sqlite3WhereBegin() loop is scanning for a table with + ** and "optimal" index, and on such a scan the output row count + ** reduction is not valid because it does not update the "pCost->used" + ** bitmap. The notReady bitmap will also be a power of two when we + ** are scanning for the last table in a 64-way join. We are willing + ** to bypass this optimization in that corner case. + */ + if( nRow>2 && cost<=pCost->rCost && (notReady & (notReady-1))!=0 ){ + int k; /* Loop counter */ + int nSkipEq = nEq; /* Number of == constraints to skip */ + int nSkipRange = nBound; /* Number of < constraints to skip */ + Bitmask thisTab; /* Bitmap for pSrc */ + + thisTab = getMask(pWC->pMaskSet, iCur); + for(pTerm=pWC->a, k=pWC->nTerm; nRow>2 && k; k--, pTerm++){ + if( pTerm->wtFlags & TERM_VIRTUAL ) continue; + if( (pTerm->prereqAll & notReady)!=thisTab ) continue; + if( pTerm->eOperator & (WO_EQ|WO_IN|WO_ISNULL) ){ + if( nSkipEq ){ + /* Ignore the first nEq equality matches since the index + ** has already accounted for these */ + nSkipEq--; + }else{ + /* Assume each additional equality match reduces the result + ** set size by a factor of 10 */ + nRow /= 10; + } + }else if( pTerm->eOperator & (WO_LT|WO_LE|WO_GT|WO_GE) ){ + if( nSkipRange ){ + /* Ignore the first nBound range constraints since the index + ** has already accounted for these */ + nSkipRange--; + }else{ + /* Assume each additional range constraint reduces the result + ** set size by a factor of 3 */ + nRow /= 3; + } + }else{ + /* Any other expression lowers the output row count by half */ + nRow /= 2; + } + } + if( nRow<2 ) nRow = 2; + } + + WHERETRACE(( - "tbl=%s idx=%s nEq=%d nInMul=%d nBound=%d bSort=%d bLookup=%d" - " wsFlags=%d (nRow=%.2f cost=%.2f)\n", + "%s(%s): nEq=%d nInMul=%d estBound=%d bSort=%d bLookup=%d wsFlags=0x%x\n" + " notReady=0x%llx nRow=%.2f cost=%.2f used=0x%llx\n", pSrc->pTab->zName, (pIdx ? pIdx->zName : "ipk"), - nEq, nInMul, nBound, bSort, bLookup, wsFlags, nRow, cost + nEq, nInMul, estBound, bSort, bLookup, wsFlags, + notReady, nRow, cost, used )); /* If this index is the best we have seen so far, then record this ** index and its cost in the pCost structure. */ - if( (!pIdx || wsFlags) && costrCost ){ + if( (!pIdx || wsFlags) + && (costrCost || (cost<=pCost->rCost && nRownRow)) + ){ pCost->rCost = cost; pCost->nRow = nRow; pCost->used = used; @@ -2515,10 +2834,12 @@ static void bestBtreeIndex( ); WHERETRACE(("best index is: %s\n", - (pCost->plan.u.pIdx ? pCost->plan.u.pIdx->zName : "ipk") + ((pCost->plan.wsFlags & WHERE_NOT_FULLSCAN)==0 ? "none" : + pCost->plan.u.pIdx ? pCost->plan.u.pIdx->zName : "ipk") )); bestOrClauseIndex(pParse, pWC, pSrc, notReady, pOrderBy, pCost); + bestAutomaticIndex(pParse, pWC, pSrc, notReady, pCost); pCost->plan.wsFlags |= eqTermMask; } @@ -2576,7 +2897,7 @@ static void bestIndex( */ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ if( pTerm - && ALWAYS((pTerm->wtFlags & TERM_CODED)==0) + && (pTerm->wtFlags & TERM_CODED)==0 && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) ){ pTerm->wtFlags |= TERM_CODED; @@ -2774,7 +3095,9 @@ static int codeAllEqualityTerms( int k = pIdx->aiColumn[j]; pTerm = findTerm(pWC, iCur, k, notReady, pLevel->plan.wsFlags, pIdx); if( NEVER(pTerm==0) ) break; - assert( (pTerm->wtFlags & TERM_CODED)==0 ); + /* The following true for indices with redundant columns. + ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */ + testcase( (pTerm->wtFlags & TERM_CODED)!=0 ); r1 = codeEqualityTerm(pParse, pTerm, pLevel, regBase+j); if( r1!=regBase+j ){ if( nReg==1 ){ @@ -2988,7 +3311,11 @@ static Bitmask codeOneLoopStart( pLevel->op = bRev ? OP_Prev : OP_Next; pLevel->p1 = iCur; pLevel->p2 = start; - pLevel->p5 = (pStart==0 && pEnd==0) ?1:0; + if( pStart==0 && pEnd==0 ){ + pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; + }else{ + assert( pLevel->p5==0 ); + } if( testOp!=OP_Noop ){ iRowidReg = iReleaseReg = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg); @@ -3057,7 +3384,8 @@ static Bitmask codeOneLoopStart( int iIdxCur; /* The VDBE cursor for the index */ int nExtraReg = 0; /* Number of extra registers needed */ int op; /* Instruction opcode */ - char *zAff; + char *zStartAff; /* Affinity for start of range constraint */ + char *zEndAff; /* Affinity for end of range constraint */ pIdx = pLevel->plan.u.pIdx; iIdxCur = pLevel->iIdxCur; @@ -3098,15 +3426,16 @@ static Bitmask codeOneLoopStart( ** starting at regBase. */ regBase = codeAllEqualityTerms( - pParse, pLevel, pWC, notReady, nExtraReg, &zAff + pParse, pLevel, pWC, notReady, nExtraReg, &zStartAff ); + zEndAff = sqlite3DbStrDup(pParse->db, zStartAff); addrNxt = pLevel->addrNxt; /* If we are doing a reverse order scan on an ascending index, or ** a forward order scan on a descending index, interchange the ** start and end terms (pRangeStart and pRangeEnd). */ - if( bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC) ){ + if( nEqnColumn && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC) ){ SWAP(WhereTerm *, pRangeEnd, pRangeStart); } @@ -3124,15 +3453,15 @@ static Bitmask codeOneLoopStart( Expr *pRight = pRangeStart->pExpr->pRight; sqlite3ExprCode(pParse, pRight, regBase+nEq); sqlite3ExprCodeIsNullJump(v, pRight, regBase+nEq, addrNxt); - if( zAff ){ - if( sqlite3CompareAffinity(pRight, zAff[nConstraint])==SQLITE_AFF_NONE){ + if( zStartAff ){ + if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_NONE){ /* Since the comparison is to be performed with no conversions ** applied to the operands, set the affinity to apply to pRight to ** SQLITE_AFF_NONE. */ - zAff[nConstraint] = SQLITE_AFF_NONE; + zStartAff[nEq] = SQLITE_AFF_NONE; } - if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[nConstraint]) ){ - zAff[nConstraint] = SQLITE_AFF_NONE; + if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){ + zStartAff[nEq] = SQLITE_AFF_NONE; } } nConstraint++; @@ -3142,7 +3471,7 @@ static Bitmask codeOneLoopStart( startEq = 0; start_constraints = 1; } - codeApplyAffinity(pParse, regBase, nConstraint, zAff); + codeApplyAffinity(pParse, regBase, nConstraint, zStartAff); op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; assert( op!=0 ); testcase( op==OP_Rewind ); @@ -3162,21 +3491,22 @@ static Bitmask codeOneLoopStart( sqlite3ExprCacheRemove(pParse, regBase+nEq, 1); sqlite3ExprCode(pParse, pRight, regBase+nEq); sqlite3ExprCodeIsNullJump(v, pRight, regBase+nEq, addrNxt); - if( zAff ){ - if( sqlite3CompareAffinity(pRight, zAff[nConstraint])==SQLITE_AFF_NONE){ + if( zEndAff ){ + if( sqlite3CompareAffinity(pRight, zEndAff[nEq])==SQLITE_AFF_NONE){ /* Since the comparison is to be performed with no conversions ** applied to the operands, set the affinity to apply to pRight to ** SQLITE_AFF_NONE. */ - zAff[nConstraint] = SQLITE_AFF_NONE; + zEndAff[nEq] = SQLITE_AFF_NONE; } - if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[nConstraint]) ){ - zAff[nConstraint] = SQLITE_AFF_NONE; + if( sqlite3ExprNeedsNoAffinityChange(pRight, zEndAff[nEq]) ){ + zEndAff[nEq] = SQLITE_AFF_NONE; } } - codeApplyAffinity(pParse, regBase, nEq+1, zAff); + codeApplyAffinity(pParse, regBase, nEq+1, zEndAff); nConstraint++; } - sqlite3DbFree(pParse->db, zAff); + sqlite3DbFree(pParse->db, zStartAff); + sqlite3DbFree(pParse->db, zEndAff); /* Top of the loop body */ pLevel->p2 = sqlite3VdbeCurrentAddr(v); @@ -3446,7 +3776,7 @@ static int nQPlan = 0; /* Next free slow in _query_plan[] */ ** Free a WhereInfo structure */ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ - if( pWInfo ){ + if( ALWAYS(pWInfo) ){ int i; for(i=0; inLevel; i++){ sqlite3_index_info *pInfo = pWInfo->a[i].pIdxInfo; @@ -3457,6 +3787,13 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ } sqlite3DbFree(db, pInfo); } + if( pWInfo->a[i].plan.wsFlags & WHERE_TEMP_INDEX ){ + Index *pIdx = pWInfo->a[i].plan.u.pIdx; + if( pIdx ){ + sqlite3DbFree(db, pIdx->zColAff); + sqlite3DbFree(db, pIdx); + } + } } whereClauseClear(pWInfo->pWC); sqlite3DbFree(db, pWInfo); @@ -3576,6 +3913,7 @@ WhereInfo *sqlite3WhereBegin( /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask */ + testcase( pTabList->nSrc==BMS ); if( pTabList->nSrc>BMS ){ sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS); return 0; @@ -3603,6 +3941,8 @@ WhereInfo *sqlite3WhereBegin( sizeof(WhereMaskSet) ); if( db->mallocFailed ){ + sqlite3DbFree(db, pWInfo); + pWInfo = 0; goto whereBeginError; } pWInfo->nLevel = nTabList; @@ -3611,6 +3951,7 @@ WhereInfo *sqlite3WhereBegin( pWInfo->iBreak = sqlite3VdbeMakeLabel(v); pWInfo->pWC = pWC = (WhereClause *)&((u8 *)pWInfo)[nByteWInfo]; pWInfo->wctrlFlags = wctrlFlags; + pWInfo->savedNQueryLoop = pParse->nQueryLoop; pMaskSet = (WhereMaskSet*)&pWC[1]; /* Split the WHERE clause into separate subexpressions where each @@ -3712,20 +4053,25 @@ WhereInfo *sqlite3WhereBegin( bestPlan.rCost = SQLITE_BIG_DBL; /* Loop through the remaining entries in the FROM clause to find the - ** next nested loop. The FROM clause entries may be iterated through + ** next nested loop. The loop tests all FROM clause entries ** either once or twice. ** - ** The first iteration, which is always performed, searches for the - ** FROM clause entry that permits the lowest-cost, "optimal" scan. In + ** The first test is always performed if there are two or more entries + ** remaining and never performed if there is only one FROM clause entry + ** to choose from. The first test looks for an "optimal" scan. In ** this context an optimal scan is one that uses the same strategy ** for the given FROM clause entry as would be selected if the entry ** were used as the innermost nested loop. In other words, a table ** is chosen such that the cost of running that table cannot be reduced - ** by waiting for other tables to run first. + ** by waiting for other tables to run first. This "optimal" test works + ** by first assuming that the FROM clause is on the inner loop and finding + ** its query plan, then checking to see if that query plan uses any + ** other FROM clause terms that are notReady. If no notReady terms are + ** used then the "optimal" query plan works. ** - ** The second iteration is only performed if no optimal scan strategies - ** were found by the first. This iteration is used to search for the - ** lowest cost scan overall. + ** The second loop iteration is only performed if no optimal scan + ** strategies were found by the first loop. This 2nd iteration is used to + ** search for the lowest cost scan overall. ** ** Previous versions of SQLite performed only the second iteration - ** the next outermost loop was always that with the lowest overall @@ -3743,9 +4089,8 @@ WhereInfo *sqlite3WhereBegin( ** algorithm may choose to use t2 for the outer loop, which is a much ** costlier approach. */ - for(isOptimal=1; isOptimal>=0 && bestJ<0; isOptimal--){ - Bitmask mask = (isOptimal ? 0 : notReady); - assert( (nTabList-iFrom)>1 || isOptimal ); + for(isOptimal=(iFrom=0; isOptimal--){ + Bitmask mask; /* Mask of tables not yet ready */ for(j=iFrom, pTabItem=&pTabList->a[j]; jpTab ); @@ -3773,8 +4119,11 @@ WhereInfo *sqlite3WhereBegin( assert( isOptimal || (sCost.used¬Ready)==0 ); if( (sCost.used¬Ready)==0 - && (j==iFrom || sCost.rCostplan = bestPlan.plan; - if( bestPlan.plan.wsFlags & WHERE_INDEXED ){ + testcase( bestPlan.plan.wsFlags & WHERE_INDEXED ); + testcase( bestPlan.plan.wsFlags & WHERE_TEMP_INDEX ); + if( bestPlan.plan.wsFlags & (WHERE_INDEXED|WHERE_TEMP_INDEX) ){ pLevel->iIdxCur = pParse->nTab++; }else{ pLevel->iIdxCur = -1; } notReady &= ~getMask(pMaskSet, pTabList->a[bestJ].iCursor); pLevel->iFrom = (u8)bestJ; + if( bestPlan.nRow>=(double)1 ) pParse->nQueryLoop *= bestPlan.nRow; /* Check that if the table scanned by this loop iteration had an ** INDEXED BY clause attached to it, that the named index is being @@ -3843,6 +4195,7 @@ WhereInfo *sqlite3WhereBegin( ** searching those tables. */ sqlite3CodeVerifySchema(pParse, -1); /* Insert the cookie verifier Goto */ + notReady = ~(Bitmask)0; for(i=0, pLevel=pWInfo->a; izAlias ){ zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias); } - if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + if( (pLevel->plan.wsFlags & WHERE_TEMP_INDEX)!=0 ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s WITH AUTOMATIC INDEX", zMsg); + }else if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ zMsg = sqlite3MAppendf(db, zMsg, "%s WITH INDEX %s", zMsg, pLevel->plan.u.pIdx->zName); }else if( pLevel->plan.wsFlags & WHERE_MULTI_OR ){ @@ -3878,8 +4233,11 @@ WhereInfo *sqlite3WhereBegin( #endif /* SQLITE_OMIT_EXPLAIN */ pTabItem = &pTabList->a[pLevel->iFrom]; pTab = pTabItem->pTab; + pLevel->iTabCur = pTabItem->iCursor; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ) continue; + if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ + /* Do nothing */ + }else #ifndef SQLITE_OMIT_VIRTUALTABLE if( (pLevel->plan.wsFlags & WHERE_VIRTUALTABLE)!=0 ){ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); @@ -3891,6 +4249,8 @@ WhereInfo *sqlite3WhereBegin( && (wctrlFlags & WHERE_OMIT_OPEN)==0 ){ int op = pWInfo->okOnePass ? OP_OpenWrite : OP_OpenRead; sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op); + testcase( pTab->nCol==BMS-1 ); + testcase( pTab->nCol==BMS ); if( !pWInfo->okOnePass && pTab->nColcolUsed; int n = 0; @@ -3902,7 +4262,11 @@ WhereInfo *sqlite3WhereBegin( }else{ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); } - pLevel->iTabCur = pTabItem->iCursor; +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX + if( (pLevel->plan.wsFlags & WHERE_TEMP_INDEX)!=0 ){ + constructAutomaticIndex(pParse, pWC, pTabItem, notReady, pLevel); + }else +#endif if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ Index *pIx = pLevel->plan.u.pIdx; KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIx); @@ -3914,8 +4278,10 @@ WhereInfo *sqlite3WhereBegin( VdbeComment((v, "%s", pIx->zName)); } sqlite3CodeVerifySchema(pParse, iDb); + notReady &= ~getMask(pWC->pMaskSet, pTabItem->iCursor); } pWInfo->iTop = sqlite3VdbeCurrentAddr(v); + if( db->mallocFailed ) goto whereBeginError; /* Generate the code to do the search. Each iteration of the for ** loop below generates code for a single nested loop of the VM @@ -3983,7 +4349,10 @@ WhereInfo *sqlite3WhereBegin( /* Jump here if malloc fails */ whereBeginError: - whereInfoFree(db, pWInfo); + if( pWInfo ){ + pParse->nQueryLoop = pWInfo->savedNQueryLoop; + whereInfoFree(db, pWInfo); + } return 0; } @@ -4053,12 +4422,15 @@ void sqlite3WhereEnd(WhereInfo *pWInfo){ struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; Table *pTab = pTabItem->pTab; assert( pTab!=0 ); - if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ) continue; - if( (pWInfo->wctrlFlags & WHERE_OMIT_CLOSE)==0 ){ - if( !pWInfo->okOnePass && (pLevel->plan.wsFlags & WHERE_IDX_ONLY)==0 ){ + if( (pTab->tabFlags & TF_Ephemeral)==0 + && pTab->pSelect==0 + && (pWInfo->wctrlFlags & WHERE_OMIT_CLOSE)==0 + ){ + int ws = pLevel->plan.wsFlags; + if( !pWInfo->okOnePass && (ws & WHERE_IDX_ONLY)==0 ){ sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor); } - if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + if( (ws & WHERE_INDEXED)!=0 && (ws & WHERE_TEMP_INDEX)==0 ){ sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur); } } @@ -4106,6 +4478,7 @@ void sqlite3WhereEnd(WhereInfo *pWInfo){ /* Final cleanup */ + pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); return; } diff --git a/test/all.test b/test/all.test index 980d252..8ccc0a6 100644 --- a/test/all.test +++ b/test/all.test @@ -10,135 +10,40 @@ #*********************************************************************** # This file runs all tests. # -# $Id: all.test,v 1.62 2009/01/06 18:43:51 drh Exp $ set testdir [file dirname $argv0] -source $testdir/tester.tcl -rename finish_test really_finish_test -proc finish_test {} { - catch {db close} - show_memstats -} - -if {[file exists ./sqlite_test_count]} { - set COUNT [exec cat ./sqlite_test_count] -} else { - set COUNT 1 -} - -if {[llength $argv]>0} { - foreach {name value} $argv { - switch -- $name { - -count { - set COUNT $value - } - -quick { - set ISQUICK $value - } - -soak { - set SOAKTEST $value - } - default { - puts stderr "Unknown option: $name" - exit - } - } - } -} -set argv {} - -# LeakList will hold a list of the number of unfreed mallocs after -# each round of the test. This number should be constant. If it -# grows, it may mean there is a memory leak in the library. -# -set LeakList {} - -set EXCLUDE {} -lappend EXCLUDE all.test ;# This file -lappend EXCLUDE async.test -lappend EXCLUDE crash.test ;# Run seperately later. -lappend EXCLUDE crash2.test ;# Run seperately later. -lappend EXCLUDE quick.test ;# Alternate test driver script -lappend EXCLUDE veryquick.test ;# Alternate test driver script -lappend EXCLUDE malloc.test ;# Run seperately later. -lappend EXCLUDE misuse.test ;# Run seperately later. -lappend EXCLUDE memleak.test ;# Alternate test driver script -lappend EXCLUDE permutations.test ;# Run seperately later. -lappend EXCLUDE soak.test ;# Takes a very long time (default 1 hr) -lappend EXCLUDE fts3.test ;# Wrapper for muliple fts3*.tests -lappend EXCLUDE mallocAll.test ;# Wrapper for running all malloc tests - -# Files to include in the test. If this list is empty then everything -# that is not in the EXCLUDE list is run. -# -set INCLUDE { -} - -for {set Counter 0} {$Counter<$COUNT && $nErr==0} {incr Counter} { - foreach testfile [lsort -dictionary [glob $testdir/*.test]] { - set tail [file tail $testfile] - if {[lsearch -exact $EXCLUDE $tail]>=0} continue - if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue - reset_prng_state - source $testfile - catch {db close} - if {$sqlite_open_file_count>0} { - puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail - set sqlite_open_file_count 0 - } - } - if {[info exists Leak]} { - lappend LeakList $Leak - } -} -set argv all source $testdir/permutations.test -set argv "" -# Do one last test to look for a memory leak in the library. This will -# only work if SQLite is compiled with the -DSQLITE_DEBUG=1 flag. -# -if {$LeakList!=""} { - puts -nonewline memory-leak-test... - incr ::nTest - foreach x $LeakList { - if {$x!=[lindex $LeakList 0]} { - puts " failed!" - puts "Expected: all values to be the same" - puts " Got: $LeakList" - incr ::nErr - lappend ::failList memory-leak-test - break - } - } - puts " Ok" -} +run_test_suite full + +run_test_suite memsubsys1 +run_test_suite memsubsys2 +run_test_suite singlethread +run_test_suite multithread +run_test_suite onefile +run_test_suite utf16 +run_test_suite exclusive +run_test_suite persistent_journal +run_test_suite persistent_journal_error +run_test_suite no_journal +run_test_suite no_journal_error +run_test_suite autovacuum_ioerr +run_test_suite no_mutex_try +run_test_suite fullmutex +run_test_suite journaltest +run_test_suite inmemory_journal +run_test_suite pcache0 +run_test_suite pcache10 +run_test_suite pcache50 +run_test_suite pcache90 +run_test_suite pcache100 -# Run the crashtest only on unix and only once. If the library does not -# always create auto-vacuum databases, also run autovacuum_crash.test. -# if {$::tcl_platform(platform)=="unix"} { - source $testdir/crash.test - source $testdir/crash2.test ifcapable !default_autovacuum { - set argv autovacuum_crash - source $testdir/permutations.test - set argv "" + run_test_suite autovacuum_crash } } -# Run the malloc tests and the misuse test after memory leak detection. -# Both tests leak memory. Currently, misuse.test also leaks a handful of -# file descriptors. This is not considered a problem, but can cause tests -# in malloc.test to fail. So set the open-file count to zero before running -# malloc.test to get around this. -# -catch {source $testdir/misuse.test} -set sqlite_open_file_count 0 -catch {source $testdir/malloc.test} +finish_test + -catch {db close} -set sqlite_open_file_count 0 -really_finish_test diff --git a/test/alter.test b/test/alter.test index 2db82dd..bf7cf00 100644 --- a/test/alter.test +++ b/test/alter.test @@ -173,6 +173,20 @@ ifcapable tempdb { } } +# Create bogus application-defined functions for functions used +# internally by ALTER TABLE, to ensure that ALTER TABLE falls back +# to the built-in functions. +# +proc failing_app_func {args} {error "bad function"} +do_test alter-1.7-prep { + db func substr failing_app_func + db func like failing_app_func + db func sqlite_rename_table failing_app_func + db func sqlite_rename_trigger failing_app_func + db func sqlite_rename_parent failing_app_func + catchsql {SELECT substr(name,1,3) FROM sqlite_master} +} {1 {bad function}} + # Make sure the ALTER TABLE statements work with the # non-callback API # @@ -567,7 +581,8 @@ do_test alter-5.3 { } {} foreach tblname [execsql { - SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite%' + SELECT name FROM sqlite_master + WHERE type='table' AND name NOT GLOB 'sqlite*' }] { execsql "DROP TABLE \"$tblname\"" } @@ -688,17 +703,17 @@ do_test alter-9.2 { do_test alter-10.1 { execsql "CREATE TABLE xyz(x UNIQUE)" execsql "ALTER TABLE xyz RENAME TO xyz\u1234abc" - execsql {SELECT name FROM sqlite_master WHERE name LIKE 'xyz%'} + execsql {SELECT name FROM sqlite_master WHERE name GLOB 'xyz*'} } [list xyz\u1234abc] do_test alter-10.2 { - execsql {SELECT name FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'} + execsql {SELECT name FROM sqlite_master WHERE name GLOB 'sqlite_autoindex*'} } [list sqlite_autoindex_xyz\u1234abc_1] do_test alter-10.3 { execsql "ALTER TABLE xyz\u1234abc RENAME TO xyzabc" - execsql {SELECT name FROM sqlite_master WHERE name LIKE 'xyz%'} + execsql {SELECT name FROM sqlite_master WHERE name GLOB 'xyz*'} } [list xyzabc] do_test alter-10.4 { - execsql {SELECT name FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'} + execsql {SELECT name FROM sqlite_master WHERE name GLOB 'sqlite_autoindex*'} } [list sqlite_autoindex_xyzabc_1] do_test alter-11.1 { @@ -795,19 +810,19 @@ do_test alter-13.1 { CREATE TABLE t3102b -- comment (y); CREATE INDEX t3102c ON t3102a(x); - SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1; } } {t3102a t3102b t3102c} do_test alter-13.2 { execsql { ALTER TABLE t3102a RENAME TO t3102a_rename; - SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1; } } {t3102a_rename t3102b t3102c} do_test alter-13.3 { execsql { ALTER TABLE t3102b RENAME TO t3102b_rename; - SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1; } } {t3102a_rename t3102b_rename t3102c} diff --git a/test/alter2.test b/test/alter2.test index 0ce023b..b5d700d 100644 --- a/test/alter2.test +++ b/test/alter2.test @@ -22,9 +22,15 @@ source $testdir/tester.tcl # We have to have pragmas in order to do this test ifcapable {!pragma} return +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts. See proc [set_file_format]. +# +do_not_use_codec + # These tests do not work if there is a codec. # #if {[catch {sqlite3 -has_codec} r] || $r} return +# # The file format change affects the way row-records stored in tables (but # not indices) are interpreted. Before version 3.1.3, a row-record for a @@ -72,6 +78,21 @@ proc alter_table {tbl sql {file_format 2}} { set_file_format 2 } +# Create bogus application-defined functions for functions used +# internally by ALTER TABLE, to ensure that ALTER TABLE falls back +# to the built-in functions. +# +proc failing_app_func {args} {error "bad function"} +do_test alter2-1.0 { + db func substr failing_app_func + db func like failing_app_func + db func sqlite_rename_table failing_app_func + db func sqlite_rename_trigger failing_app_func + db func sqlite_rename_parent failing_app_func + catchsql {SELECT substr('abcdefg',1,3)} +} {1 {bad function}} + + #----------------------------------------------------------------------- # Some basic tests to make sure short rows are handled. # @@ -233,7 +254,8 @@ ifcapable trigger { do_test alter2-4.1 { db close set_file_format 5 - sqlite3 db test.db + catch { sqlite3 db test.db } + set {} {} } {} do_test alter2-4.2 { # We have to run two queries here because the Tcl interface uses diff --git a/test/analyze.test b/test/analyze.test index 867cf0f..5bd653a 100644 --- a/test/analyze.test +++ b/test/analyze.test @@ -296,7 +296,7 @@ do_test analyze-99.1 { UPDATE sqlite_master SET sql='nonsense' WHERE name='sqlite_stat1'; } db close - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { ANALYZE } diff --git a/test/analyze2.test b/test/analyze2.test index b149fe0..7a606bb 100644 --- a/test/analyze2.test +++ b/test/analyze2.test @@ -22,6 +22,11 @@ ifcapable !stat2 { return } +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + #-------------------------------------------------------------------- # Test organization: # diff --git a/test/async.test b/test/async.test index 835d39c..f1d641d 100644 --- a/test/async.test +++ b/test/async.test @@ -23,8 +23,8 @@ proc finish_test {} { catch {db2 close} catch {db3 close} } -if {[info exists ISQUICK]} { set ASYNC_SAVE_ISQUICK $ISQUICK } -set ISQUICK 1 +if {[info exists G(isquick)]} { set ASYNC_SAVE_ISQUICK $G(isquick) } +set G(isquick) 1 set ASYNC_INCLUDE { insert.test @@ -43,13 +43,22 @@ set ASYNC_INCLUDE { # Enable asynchronous IO. sqlite3async_initialize "" 1 +# This proc flushes the contents of the async-IO queue through to the +# underlying VFS. A couple of the test scripts identified in $ASYNC_INCLUDE +# above contain lines like "catch flush_async_queue" in places where +# this is required for the tests to work in async mode. +# +proc flush_async_queue {} { + sqlite3async_control halt idle + sqlite3async_start + sqlite3async_wait + sqlite3async_control halt never +} + rename do_test async_really_do_test proc do_test {name args} { uplevel async_really_do_test async_io-$name $args - sqlite3async_start - sqlite3async_control halt idle - sqlite3async_wait - sqlite3async_control halt never + flush_async_queue } foreach testfile [lsort -dictionary [glob $testdir/*.test]] { @@ -62,19 +71,13 @@ foreach testfile [lsort -dictionary [glob $testdir/*.test]] { # [file delete]). If the asynchronous backend still has the file # open, it will become confused. # - sqlite3async_control halt idle - sqlite3async_start - sqlite3async_wait - sqlite3async_control halt never + flush_async_queue } # Flush the write-queue and disable asynchronous IO. This should ensure # all allocated memory is cleaned up. set sqlite3async_trace 1 -sqlite3async_control halt idle -sqlite3async_start -sqlite3async_wait -sqlite3async_control halt never +flush_async_queue sqlite3async_shutdown set sqlite3async_trace 0 @@ -83,5 +86,5 @@ rename async_really_do_test do_test rename finish_test {} rename async_really_finish_test finish_test -if {[info exists ASYNC_SAVE_ISQUICK]} { set ISQUICK $ASYNC_SAVE_ISQUICK } +if {[info exists ASYNC_SAVE_ISQUICK]} { set G(isquick) $ASYNC_SAVE_ISQUICK } finish_test diff --git a/test/async4.test b/test/async4.test index 584a4af..92a8201 100644 --- a/test/async4.test +++ b/test/async4.test @@ -14,6 +14,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # These tests only work for Tcl version 8.5 and later on Windows (for now) # if {$tcl_platform(platform)=="windows"} { @@ -94,6 +99,7 @@ do_test async4.1.14 { do_test async4.1.15 { sqlite3async_start sqlite3async_wait + hexio_write test.db 28 00000000 execsql { pragma integrity_check } db2 } {{*** in database main *** Page 5 is never used}} diff --git a/test/autoindex1.test b/test/autoindex1.test new file mode 100644 index 0000000..96b4f1a --- /dev/null +++ b/test/autoindex1.test @@ -0,0 +1,139 @@ +# 2010 April 07 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing automatic index creation logic. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the library is not compiled with automatic index support then +# skip all tests in this file. +# +ifcapable {!autoindex} { + finish_test + return +} + +# With automatic index turned off, we do a full scan of the T2 table +do_test autoindex1-100 { + db eval { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,11); + INSERT INTO t1 VALUES(2,22); + INSERT INTO t1 SELECT a+2, b+22 FROM t1; + INSERT INTO t1 SELECT a+4, b+44 FROM t1; + CREATE TABLE t2(c,d); + INSERT INTO t2 SELECT a, 900+b FROM t1; + } + db eval { + PRAGMA automatic_index=OFF; + SELECT b, d FROM t1 JOIN t2 ON a=c ORDER BY b; + } +} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988} +do_test autoindex1-101 { + db status step +} {63} +do_test autoindex1-102 { + db status autoindex +} {0} + +# With autoindex turned on, we build an index once and then use that index +# to find T2 values. +do_test autoindex1-110 { + db eval { + PRAGMA automatic_index=ON; + SELECT b, d FROM t1 JOIN t2 ON a=c ORDER BY b; + } +} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988} +do_test autoindex1-111 { + db status step +} {7} +do_test autoindex1-112 { + db status autoindex +} {7} + +# The same test as above, but this time the T2 query is a subquery rather +# than a join. +do_test autoindex1-200 { + db eval { + PRAGMA automatic_index=OFF; + SELECT b, (SELECT d FROM t2 WHERE c=a) FROM t1; + } +} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988} +do_test autoindex1-201 { + db status step +} {35} +do_test autoindex1-202 { + db status autoindex +} {0} +do_test autoindex1-210 { + db eval { + PRAGMA automatic_index=ON; + SELECT b, (SELECT d FROM t2 WHERE c=a) FROM t1; + } +} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988} +do_test autoindex1-211 { + db status step +} {7} +do_test autoindex1-212 { + db status autoindex +} {7} + + +# Modify the second table of the join while the join is in progress +# +do_test autoindex1-300 { + set r {} + db eval {SELECT b, d FROM t1 JOIN t2 ON (c=a)} { + lappend r $b $d + db eval {UPDATE t2 SET d=d+1} + } + set r +} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988} +do_test autoindex1-310 { + db eval {SELECT d FROM t2 ORDER BY d} +} {919 930 941 952 963 974 985 996} + +# The next test does a 10-way join on unindexed tables. Without +# automatic indices, the join will take a long time to complete. +# With automatic indices, it should only take about a second. +# +do_test autoindex1-400 { + db eval { + CREATE TABLE t4(a, b); + INSERT INTO t4 VALUES(1,2); + INSERT INTO t4 VALUES(2,3); + } + for {set n 2} {$n<4096} {set n [expr {$n+$n}]} { + db eval {INSERT INTO t4 SELECT a+$n, b+$n FROM t4} + } + db eval { + SELECT count(*) FROM t4; + } +} {4096} +do_test autoindex1-401 { + db eval { + SELECT count(*) + FROM t4 AS x1 + JOIN t4 AS x2 ON x2.a=x1.b + JOIN t4 AS x3 ON x3.a=x2.b + JOIN t4 AS x4 ON x4.a=x3.b + JOIN t4 AS x5 ON x5.a=x4.b + JOIN t4 AS x6 ON x6.a=x5.b + JOIN t4 AS x7 ON x7.a=x6.b + JOIN t4 AS x8 ON x8.a=x7.b + JOIN t4 AS x9 ON x9.a=x8.b + JOIN t4 AS x10 ON x10.a=x9.b; + } +} {4087} + +finish_test diff --git a/test/autovacuum.test b/test/autovacuum.test index b843707..57dfd52 100644 --- a/test/autovacuum.test +++ b/test/autovacuum.test @@ -654,12 +654,14 @@ do_test autovacuum-8.1 { sqlite3 db2 test.db db eval {PRAGMA auto_vacuum} } {1} -do_test autovacuum-8.2 { - db eval {BEGIN EXCLUSIVE} - catchsql {PRAGMA auto_vacuum} db2 -} {1 {database is locked}} -catch {db2 close} -catch {db eval {COMMIT}} +if {[permutation] == ""} { + do_test autovacuum-8.2 { + db eval {BEGIN EXCLUSIVE} + catchsql {PRAGMA auto_vacuum} db2 + } {1 {database is locked}} + catch {db2 close} + catch {db eval {COMMIT}} +} do_test autovacuum-9.1 { execsql { diff --git a/test/avtrans.test b/test/avtrans.test index 328e028..17a2860 100644 --- a/test/avtrans.test +++ b/test/avtrans.test @@ -22,8 +22,9 @@ source $testdir/tester.tcl # Create several tables to work with. # do_test avtrans-1.0 { - execsql { - PRAGMA auto_vacuum=ON; + execsql { PRAGMA auto_vacuum=ON } + wal_set_journal_mode + execsql { CREATE TABLE one(a int PRIMARY KEY, b text); INSERT INTO one VALUES(1,'one'); INSERT INTO one VALUES(2,'two'); @@ -48,6 +49,7 @@ do_test avtrans-1.10 { execsql {SELECT b FROM two ORDER BY a} altdb } {I V X} integrity_check avtrans-1.11 +wal_check_journal_mode avtrans-1.12 # Basic transactions # @@ -84,6 +86,7 @@ do_test avtrans-2.10 { } } {1 2 3 1 5 10} integrity_check avtrans-2.11 +wal_check_journal_mode avtrans-2.12 # Check the locking behavior # @@ -162,7 +165,7 @@ do_test avtrans-3.14 { } db} msg] lappend v $msg } {0 {1 2 3 4}} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) integrity_check avtrans-3.15 do_test avtrans-4.1 { @@ -854,7 +857,7 @@ proc signature {} { # t3 a little larger, and thus takes a little longer, so doing 40 tests # is more than 2.0 times slower than doing 20 tests. Considerably more. # -if {[info exists ISQUICK]} { +if {[info exists G(isquick)]} { set limit 20 } else { set limit 40 @@ -913,9 +916,11 @@ for {set i 2} {$i<=$limit} {incr i} { } {1} } } + wal_check_journal_mode avtrans-9.$i-6.$cnt } set ::pager_old_format 0 } integrity_check avtrans-10.1 +wal_check_journal_mode avtrans-10.2 finish_test diff --git a/test/backup.test b/test/backup.test index ec30adb..f31af1d 100644 --- a/test/backup.test +++ b/test/backup.test @@ -16,6 +16,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +do_not_use_codec + #--------------------------------------------------------------------- # Test organization: # @@ -38,6 +40,8 @@ source $testdir/tester.tcl # # backup-9.*: Test that passing a negative argument to backup_step() is # interpreted as "copy the whole file". +# +# backup-10.*: Test writing the source database mid backup. # proc data_checksum {db file} { $db one "SELECT md5sum(a, b) FROM ${file}.t1" } @@ -487,6 +491,7 @@ db2 close # 3) Backing up memory-to-file. # set iTest 0 +file delete -force bak.db-wal foreach {writer file} {db test.db db3 test.db db :memory:} { incr iTest catch { file delete bak.db } @@ -905,6 +910,7 @@ ifcapable memorymanage { } +#----------------------------------------------------------------------- # Test that if the database is written to via the same database handle being # used as the source by a backup operation: # diff --git a/test/backup2.test b/test/backup2.test index 17b5f6d..749c460 100644 --- a/test/backup2.test +++ b/test/backup2.test @@ -18,6 +18,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +do_not_use_codec + ifcapable !trigger||!view { finish_test ; return } # Fill a database with test data. diff --git a/test/backup_malloc.test b/test/backup_malloc.test index 17718b2..f556861 100644 --- a/test/backup_malloc.test +++ b/test/backup_malloc.test @@ -68,9 +68,10 @@ do_malloc_test backup_malloc-1 -tclprep { B finish } -cleanup { catch { B finish } + catch { db2 close } } -do_malloc_test backup_malloc-1 -tclprep { +do_malloc_test backup_malloc-2 -tclprep { sqlite3 db2 test2.db } -tclbody { set rc [catch {sqlite3_backup B db2 temp db main}] diff --git a/test/bigfile.test b/test/bigfile.test index 24a92c5..52d74ed 100644 --- a/test/bigfile.test +++ b/test/bigfile.test @@ -18,6 +18,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for this file, as the database is manipulated using +# external methods (the [fake_big_file] and [hexio_write] commands). +# +do_not_use_codec + # If SQLITE_DISABLE_LFS is defined, omit this file. ifcapable !lfs { finish_test @@ -69,6 +74,7 @@ if {[catch {fake_big_file 4096 [pwd]/test.db} msg]} { finish_test return } +hexio_write test.db 28 00000000 do_test bigfile-1.2 { sqlite3 db test.db @@ -108,6 +114,7 @@ if {[catch {fake_big_file 8192 [pwd]/test.db}]} { finish_test return } +hexio_write test.db 28 00000000 do_test bigfile-1.5 { sqlite3 db test.db @@ -146,6 +153,7 @@ if {[catch {fake_big_file 16384 [pwd]/test.db}]} { finish_test return } +hexio_write test.db 28 00000000 do_test bigfile-1.10 { sqlite3 db test.db diff --git a/test/cache.test b/test/cache.test index dd51c7c..3f42a47 100644 --- a/test/cache.test +++ b/test/cache.test @@ -28,9 +28,9 @@ proc pager_cache_size {db} { return $stats(page) } -do_test cache-1.1 { - pager_cache_size db -} {0} +if {[permutation] == ""} { + do_test cache-1.1 { pager_cache_size db } {0} +} do_test cache-1.2 { execsql { @@ -58,6 +58,6 @@ for {set ii 0} {$ii < 10} {incr ii} { } $::cache_size } -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) finish_test diff --git a/test/capi2.test b/test/capi2.test index 8bcb25a..b4aa970 100644 --- a/test/capi2.test +++ b/test/capi2.test @@ -71,15 +71,19 @@ do_test capi2-1.6 { do_test capi2-1.7 { list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM] } {2 {} {name rowid text INTEGER}} -do_test capi2-1.8-misuse { + +# This used to be SQLITE_MISUSE. But now we automatically reset prepared +# statements. +do_test capi2-1.8 { sqlite3_step $VM -} {SQLITE_MISUSE} +} {SQLITE_ROW} # Update: In v2, once SQLITE_MISUSE is returned the statement handle cannot # be interrogated for more information. However in v3, since the column # count, names and types are determined at compile time, these are still # accessible after an SQLITE_MISUSE error. do_test capi2-1.9 { + sqlite3_reset $VM list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM] } {2 {} {name rowid text INTEGER}} do_test capi2-1.10 { diff --git a/test/capi3.test b/test/capi3.test index 0111dba..4f5e02c 100644 --- a/test/capi3.test +++ b/test/capi3.test @@ -17,6 +17,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Return the UTF-16 representation of the supplied UTF-8 string $str. # If $nt is true, append two 0x00 bytes as a nul terminator. proc utf16 {str {nt 1}} { @@ -635,7 +640,7 @@ db close do_test capi3-6.0 { sqlite3 db test.db set DB [sqlite3_connection_pointer db] - sqlite3_key $DB xyzzy + if {[sqlite3 -has-codec]==0} { sqlite3_key $DB xyzzy } set sql {SELECT a FROM t1 order by rowid} set STMT [sqlite3_prepare $DB $sql -1 TAIL] expr 0 @@ -680,7 +685,7 @@ if {![sqlite3 -has-codec]} { set_file_format 5 } {} do_test capi3-7.2 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } @@ -708,7 +713,7 @@ if {![sqlite3 -has-codec]} { db close } {} do_test capi3-8.3 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } @@ -727,7 +732,7 @@ if {![sqlite3 -has-codec]} { db close } {}; do_test capi3-8.5 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } diff --git a/test/capi3b.test b/test/capi3b.test index 44790c7..72bbbaf 100644 --- a/test/capi3b.test +++ b/test/capi3b.test @@ -141,5 +141,5 @@ do_test capi3b-2.12 { catch {db2 close} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) finish_test diff --git a/test/capi3c.test b/test/capi3c.test index 55f4667..af97943 100644 --- a/test/capi3c.test +++ b/test/capi3c.test @@ -19,6 +19,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Return the UTF-16 representation of the supplied UTF-8 string $str. # If $nt is true, append two 0x00 bytes as a nul terminator. proc utf16 {str {nt 1}} { @@ -606,7 +611,7 @@ db close do_test capi3c-6.0 { sqlite3 db test.db set DB [sqlite3_connection_pointer db] - sqlite3_key $DB xyzzy + if {[sqlite3 -has-codec]==0} { sqlite3_key $DB xyzzy } set sql {SELECT a FROM t1 order by rowid} set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] expr 0 @@ -653,7 +658,7 @@ if {![sqlite3 -has-codec]} { set_file_format 5 } {} do_test capi3c-7.2 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } @@ -681,7 +686,7 @@ if {![sqlite3 -has-codec]} { db close } {} do_test capi3c-8.3 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } @@ -700,7 +705,7 @@ if {![sqlite3 -has-codec]} { db close } {}; do_test capi3c-8.5 { - sqlite3 db test.db + catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } diff --git a/test/collate4.test b/test/collate4.test index 4db4c31..12bc16e 100644 --- a/test/collate4.test +++ b/test/collate4.test @@ -321,6 +321,7 @@ do_test collate4-1.2.25 { # do_test collate4-2.1.0 { execsql { + PRAGMA automatic_index=OFF; CREATE TABLE collate4t1(a COLLATE NOCASE); CREATE TABLE collate4t2(b COLLATE TEXT); @@ -426,7 +427,7 @@ do_test collate4-2.2.0 { } {} do_test collate4-2.2.1 { count { - SELECT * FROM collate4t2 NATURAL JOIN collate4t1; + SELECT * FROM collate4t2 NOT INDEXED NATURAL JOIN collate4t1 NOT INDEXED; } } {0 0 0 0 0 1 0 1 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 63} do_test collate4-2.2.1b { diff --git a/test/corrupt.test b/test/corrupt.test index fc84033..719c19c 100644 --- a/test/corrupt.test +++ b/test/corrupt.test @@ -20,6 +20,11 @@ catch {file delete -force test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Construct a large database for testing. # do_test corrupt-1.1 { diff --git a/test/corrupt2.test b/test/corrupt2.test index acda254..bdbc016 100644 --- a/test/corrupt2.test +++ b/test/corrupt2.test @@ -18,6 +18,15 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + +set presql "" +catch { set presql "$::G(perm:presql);" } +unset -nocomplain ::G(perm:presql) + # The following tests - corrupt2-1.* - create some databases corrupted in # specific ways and ensure that SQLite detects them as corrupt. # @@ -41,9 +50,10 @@ do_test corrupt2-1.2 { close $f sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql SELECT * FROM sqlite_master; - } db2 + " db2 } {1 {file is encrypted or is not a database}} do_test corrupt2-1.3 { @@ -60,9 +70,10 @@ do_test corrupt2-1.3 { close $f sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql SELECT * FROM sqlite_master; - } db2 + " db2 } {1 {file is encrypted or is not a database}} do_test corrupt2-1.4 { @@ -79,9 +90,10 @@ do_test corrupt2-1.4 { close $f sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql SELECT * FROM sqlite_master; - } db2 + " db2 } {1 {database disk image is malformed}} do_test corrupt2-1.5 { @@ -101,9 +113,10 @@ do_test corrupt2-1.5 { close $f sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql SELECT * FROM sqlite_master; - } db2 + " db2 } {1 {database disk image is malformed}} db2 close @@ -115,7 +128,8 @@ do_test corrupt2-2.1 { copy_file test.db corrupt.db sqlite3 db2 corrupt.db - execsql { + execsql " + $::presql CREATE INDEX a1 ON abc(a); CREATE INDEX a2 ON abc(b); PRAGMA writable_schema = 1; @@ -123,13 +137,14 @@ do_test corrupt2-2.1 { SET name = 'a3', sql = 'CREATE INDEX a3' || substr(sql, 16, 10000) WHERE type = 'index'; PRAGMA writable_schema = 0; - } db2 + " db2 db2 close sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql SELECT * FROM sqlite_master; - } db2 + " db2 } {1 {malformed database schema (a3) - index a3 already exists}} db2 close @@ -139,7 +154,8 @@ do_test corrupt2-3.1 { file delete -force corrupt.db-journal sqlite3 db2 corrupt.db - execsql { + execsql " + $::presql PRAGMA auto_vacuum = 1; PRAGMA page_size = 1024; CREATE TABLE t1(a, b, c); @@ -149,7 +165,7 @@ do_test corrupt2-3.1 { INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; - } db2 + " db2 db2 close @@ -168,9 +184,10 @@ do_test corrupt2-3.1 { close $fd sqlite3 db2 corrupt.db - catchsql { + catchsql " + $::presql DROP TABLE t1; - } db2 + " db2 } {1 {database disk image is malformed}} do_test corrupt2-4.1 { @@ -187,7 +204,8 @@ do_test corrupt2-5.1 { file delete -force corrupt.db-journal sqlite3 db2 corrupt.db - execsql { + execsql " + $::presql PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; CREATE TABLE t1(a, b, c); @@ -198,7 +216,7 @@ do_test corrupt2-5.1 { INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; INSERT INTO t1 SELECT * FROM t2; - } db2 + " db2 db2 close @@ -219,6 +237,7 @@ do_test corrupt2-5.1 { close $fd sqlite3 db2 corrupt.db + db2 eval $::presql db2 eval {SELECT rowid FROM t1} { set result [db2 eval {pragma integrity_check}] break @@ -242,6 +261,7 @@ proc corruption_test {args} { file delete -force corrupt.db-journal sqlite3 db corrupt.db + db eval $::presql eval $A(-tclprep) db eval $A(-sqlprep) db close @@ -276,7 +296,7 @@ ifcapable autovacuum { hexio_write corrupt.db [expr 1024*5] 00000008 } -test { do_test corrupt2-6.1 { - catchsql { pragma incremental_vacuum = 1 } + catchsql " $::presql pragma incremental_vacuum = 1 " } {1 {database disk image is malformed}} } @@ -297,7 +317,7 @@ ifcapable autovacuum { hexio_write corrupt.db [expr 1024*2 + 8] 00000009 } -test { do_test corrupt2-6.2 { - catchsql { pragma incremental_vacuum = 1 } + catchsql " $::presql pragma incremental_vacuum = 1 " } {1 {database disk image is malformed}} } @@ -318,7 +338,7 @@ ifcapable autovacuum { hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000 } -test { do_test corrupt2-6.3 { - catchsql { pragma incremental_vacuum = 1 } + catchsql " $::presql pragma incremental_vacuum = 1 " } {1 {database disk image is malformed}} } @@ -334,12 +354,14 @@ ifcapable autovacuum { seek $fd 0 end puts -nonewline $fd [string repeat x $nAppend] close $fd + hexio_write corrupt.db 28 00000000 } -test { do_test corrupt2-6.4 { - catchsql { + catchsql " + $::presql BEGIN EXCLUSIVE; COMMIT; - } + " } {1 {database disk image is malformed}} } } @@ -373,7 +395,7 @@ corruption_test -sqlprep $sqlprep -corrupt { close $fd } -test { do_test corrupt2-7.1 { - catchsql { SELECT b FROM t1 ORDER BY b ASC } + catchsql " $::presql SELECT b FROM t1 ORDER BY b ASC " } {1 {database disk image is malformed}} } @@ -394,7 +416,7 @@ corruption_test -sqlprep $sqlprep -corrupt { close $fd } -test { do_test corrupt2-7.1 { - catchsql { SELECT b FROM t1 ORDER BY b DESC } + catchsql " $::presql SELECT b FROM t1 ORDER BY b DESC " } {1 {database disk image is malformed}} } @@ -412,7 +434,7 @@ corruption_test -sqlprep $sqlprep -corrupt { close $fd } -test { do_test corrupt2-8.1 { - catchsql { SELECT * FROM t1 WHERE rowid=1000 } + catchsql " $::presql SELECT * FROM t1 WHERE rowid=1000 " } {1 {database disk image is malformed}} } @@ -440,7 +462,7 @@ corruption_test -sqlprep { close $fd } -test { do_test corrupt2-9.1 { - catchsql { SELECT sql FROM sqlite_master } + catchsql " $::presql SELECT sql FROM sqlite_master " } {1 {database disk image is malformed}} } @@ -451,7 +473,7 @@ corruption_test -sqlprep { UPDATE sqlite_master SET rootpage = NULL WHERE name = 't2'; } -test { do_test corrupt2-10.1 { - catchsql { SELECT * FROM t2 } + catchsql " $::presql SELECT * FROM t2 " } {1 {malformed database schema (t2)}} do_test corrupt2-10.2 { sqlite3_errcode db @@ -476,7 +498,7 @@ corruption_test -sqlprep { hexio_write corrupt.db 24 12345678 } -test { do_test corrupt2-11.1 { - catchsql { PRAGMA incremental_vacuum } + catchsql " $::presql PRAGMA incremental_vacuum " } {1 {database disk image is malformed}} } corruption_test -sqlprep { @@ -497,7 +519,7 @@ corruption_test -sqlprep { hexio_write corrupt.db 24 12345678 } -test { do_test corrupt2-12.1 { - catchsql { PRAGMA incremental_vacuum } + catchsql " $::presql PRAGMA incremental_vacuum " } {1 {database disk image is malformed}} } @@ -515,13 +537,14 @@ ifcapable autovacuum { INSERT INTO t1 VALUES(NULL, randstr(50,50)); } for {set ii 0} {$ii < 10} {incr ii} { - db eval { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 } + db eval " $::presql INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 " } } -corrupt { do_test corrupt2-13.1 { file size corrupt.db } $::sqlite_pending_byte hexio_write corrupt.db [expr $::sqlite_pending_byte+1023] 00 + hexio_write corrupt.db 28 00000000 } -test { do_test corrupt2-13.2 { file size corrupt.db diff --git a/test/corrupt3.test b/test/corrupt3.test index af69f2e..790f521 100644 --- a/test/corrupt3.test +++ b/test/corrupt3.test @@ -18,6 +18,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +if {[sqlite3 -has-codec]} { finish_test ; return } + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas { diff --git a/test/corrupt4.test b/test/corrupt4.test index 952df70..1906113 100644 --- a/test/corrupt4.test +++ b/test/corrupt4.test @@ -18,6 +18,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas { diff --git a/test/corrupt6.test b/test/corrupt6.test index 2fed806..c0dcedf 100644 --- a/test/corrupt6.test +++ b/test/corrupt6.test @@ -19,6 +19,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas { diff --git a/test/corrupt7.test b/test/corrupt7.test index 39aa620..ad56656 100644 --- a/test/corrupt7.test +++ b/test/corrupt7.test @@ -19,6 +19,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas { diff --git a/test/corrupt8.test b/test/corrupt8.test index 994f4aa..012beb5 100644 --- a/test/corrupt8.test +++ b/test/corrupt8.test @@ -19,6 +19,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas||!autovacuum { diff --git a/test/corrupt9.test b/test/corrupt9.test index d5f59e7..f083cf2 100644 --- a/test/corrupt9.test +++ b/test/corrupt9.test @@ -19,6 +19,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # We must have the page_size pragma for these tests to work. # ifcapable !pager_pragmas { diff --git a/test/corruptA.test b/test/corruptA.test index fa3257f..58e14da 100644 --- a/test/corruptA.test +++ b/test/corruptA.test @@ -37,9 +37,11 @@ integrity_check corruptA-1.2 db close file copy -force test.db test.db-template +set unreadable_version 02 +ifcapable wal { set unreadable_version 03 } do_test corruptA-2.1 { file copy -force test.db-template test.db - hexio_write test.db 19 02 ;# the read format number + hexio_write test.db 19 $unreadable_version ;# the read format number sqlite3 db test.db catchsql {SELECT * FROM t1} } {1 {file is encrypted or is not a database}} diff --git a/test/corruptB.test b/test/corruptB.test index 7d3740b..4aa1d01 100644 --- a/test/corruptB.test +++ b/test/corruptB.test @@ -25,6 +25,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + do_test corruptB-1.1 { execsql { @@ -154,7 +159,7 @@ do_test corruptB-2.1.1 { do_test corruptB-2.1.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } -} {1 {database or disk is full}} +} {1 {database disk image is malformed}} #--------------------------------------------------------------------------- diff --git a/test/corruptC.test b/test/corruptC.test index aff7b4a..0580426 100644 --- a/test/corruptC.test +++ b/test/corruptC.test @@ -22,6 +22,11 @@ catch {file delete -force test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Construct a compact, dense database for testing. # do_test corruptC-1.1 { @@ -67,7 +72,7 @@ sqlite3 db test.db set fsize [file size test.db] # Set a quasi-random random seed. -if {[info exists SOAKTEST]} { +if {[info exists ::G(issoak)]} { # If we are doing SOAK tests, we want a different # random seed for each run. Ideally we would like # to use [clock clicks] or something like that here. @@ -276,6 +281,17 @@ do_test corruptC-2.14 { catchsql {DELETE FROM t1 WHERE rowid = (SELECT max(rowid) FROM t1)} } {1 {database disk image is malformed}} +# At one point this particular corrupt database was causing a buffer +# overread. Which caused a crash in a run of all.test once. +# +do_test corruptC-2.15 { + db close + copy_file test.bu test.db + hexio_write test.db 986 b9 + sqlite3 db test.db + catchsql {SELECT count(*) FROM sqlite_master;} +} {1 {malformed database schema (t1i1) - no such table: main.t1}} + # # Now test for a series of quasi-random seeds. # We loop over the entire file size and touch diff --git a/test/corruptE.test b/test/corruptE.test index 35fa545..a47adba 100644 --- a/test/corruptE.test +++ b/test/corruptE.test @@ -21,6 +21,18 @@ catch {file delete -force test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + +# Do not run the tests in this file if ENABLE_OVERSIZE_CELL_CHECK is on. +# +ifcapable oversize_cell_check { + finish_test + return +} + # Construct a compact, dense database for testing. # do_test corruptE-1.1 { diff --git a/test/crash8.test b/test/crash8.test index b4e89f4..6424a1a 100644 --- a/test/crash8.test +++ b/test/crash8.test @@ -258,6 +258,7 @@ ifcapable pragma { INSERT INTO ab VALUES(6, NULL); UPDATE ab SET b = randstr(1000,1000); ATTACH 'test2.db' AS aux; + PRAGMA aux.journal_mode = persist; CREATE TABLE aux.ab(a, b); INSERT INTO aux.ab SELECT * FROM main.ab; @@ -382,7 +383,7 @@ for {set i 1} {$i < 10} {incr i} { BEGIN; UPDATE t1 SET x = randomblob(900); } - file delete -force testX.db testX.db-journal + file delete -force testX.db testX.db-journal testX.db-wal copy_file test.db testX.db copy_file test.db-journal testX.db-journal db close diff --git a/test/ctime.test b/test/ctime.test index bb184b8..e4cb156 100644 --- a/test/ctime.test +++ b/test/ctime.test @@ -86,8 +86,11 @@ do_test ctime-1.5 { set ans2 [ catchsql { SELECT sqlite_compileoption_used('THREADSAFE=1'); } ] - lsort [ list $ans1 $ans2 ] -} {{0 0} {0 1}} + set ans3 [ catchsql { + SELECT sqlite_compileoption_used('THREADSAFE=2'); + } ] + lsort [ list $ans1 $ans2 $ans3 ] +} {{0 0} {0 0} {0 1}} do_test ctime-1.6 { execsql { diff --git a/test/date.test b/test/date.test index df522c2..29b5ed4 100644 --- a/test/date.test +++ b/test/date.test @@ -16,6 +16,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Skip this whole file if date and time functions are omitted # at compile-time # @@ -495,30 +500,32 @@ datetest 13.34 {date('2001-01-01','-1.5 years')} {1999-07-02} # Put a floating point number in the database so that we can manipulate # raw bits using the hexio interface. # -do_test date-14.1 { - execsql { - PRAGMA auto_vacuum=OFF; - PRAGMA page_size = 1024; - CREATE TABLE t1(x); - INSERT INTO t1 VALUES(1.1); +if {0==[sqlite3 -has-codec]} { + do_test date-14.1 { + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size = 1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1.1); + } + db close + hexio_write test.db 2040 4142ba32bffffff9 + sqlite3 db test.db + db eval {SELECT * FROM t1} + } {2454629.5} + + # Changing the least significant byte of the floating point value between + # 00 and FF should always generate a time of either 23:59:59 or 00:00:00, + # never 24:00:00 + # + for {set i 0} {$i<=255} {incr i} { + db close + hexio_write test.db 2047 [format %02x $i] + sqlite3 db test.db + do_test date-14.2.$i { + set date [db one {SELECT datetime(x) FROM t1}] + expr {$date eq "2008-06-12 00:00:00" || $date eq "2008-06-11 23:59:59"} + } {1} } - db close - hexio_write test.db 2040 4142ba32bffffff9 - sqlite3 db test.db - db eval {SELECT * FROM t1} -} {2454629.5} - -# Changing the least significant byte of the floating point value between -# 00 and FF should always generate a time of either 23:59:59 or 00:00:00, -# never 24:00:00 -# -for {set i 0} {$i<=255} {incr i} { - db close - hexio_write test.db 2047 [format %02x $i] - sqlite3 db test.db - do_test date-14.2.$i { - set date [db one {SELECT datetime(x) FROM t1}] - expr {$date eq "2008-06-12 00:00:00" || $date eq "2008-06-11 23:59:59"} - } {1} } finish_test diff --git a/test/dbstatus.test b/test/dbstatus.test new file mode 100644 index 0000000..c7a6b58 --- /dev/null +++ b/test/dbstatus.test @@ -0,0 +1,45 @@ +# 2010 March 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for the sqlite3_db_status() function +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Make sure sqlite3_db_config() and sqlite3_db_status are working. +# +unset -nocomplain PAGESZ +unset -nocomplain BASESZ +do_test dbstatus-1.1 { + db close + sqlite3 db :memory: + db eval { + CREATE TABLE t1(x); + } + set sz1 [lindex [sqlite3_db_status db SQLITE_DBSTATUS_CACHE_USED 0] 1] + db eval { + CREATE TABLE t2(y); + } + set sz2 [lindex [sqlite3_db_status db SQLITE_DBSTATUS_CACHE_USED 0] 1] + set ::PAGESZ [expr {$sz2-$sz1}] + set ::BASESZ [expr {$sz1-$::PAGESZ}] + expr {$::PAGESZ>1024 && $::PAGESZ<1200} +} {1} +do_test dbstatus-1.2 { + db eval { + INSERT INTO t1 VALUES(zeroblob(9000)); + } + lindex [sqlite3_db_status db SQLITE_DBSTATUS_CACHE_USED 0] 1 +} [expr {$BASESZ + 10*$PAGESZ}] + +finish_test diff --git a/test/descidx1.test b/test/descidx1.test index 60cf969..1154dc5 100644 --- a/test/descidx1.test +++ b/test/descidx1.test @@ -17,6 +17,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + db eval {PRAGMA legacy_file_format=OFF} # This procedure sets the value of the file-format in file 'test.db' diff --git a/test/descidx2.test b/test/descidx2.test index 69a88f1..fdc3eb0 100644 --- a/test/descidx2.test +++ b/test/descidx2.test @@ -17,6 +17,12 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + + db eval {PRAGMA legacy_file_format=OFF} # This procedure sets the value of the file-format in file 'test.db' diff --git a/test/descidx3.test b/test/descidx3.test index 5cfd1f6..3cc87af 100644 --- a/test/descidx3.test +++ b/test/descidx3.test @@ -17,6 +17,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + ifcapable !bloblit { finish_test return diff --git a/test/e_expr.test b/test/e_expr.test new file mode 100644 index 0000000..2ca5ac7 --- /dev/null +++ b/test/e_expr.test @@ -0,0 +1,332 @@ +# 2010 July 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements tests to verify that the "testable statements" in +# the lang_expr.html document are correct. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Set up three global variables: +# +# ::opname An array mapping from SQL operator to an easy to parse +# name. The names are used as part of test case names. +# +# ::opprec An array mapping from SQL operator to a numeric +# precedence value. Operators that group more tightly +# have lower numeric precedences. +# +# ::oplist A list of all SQL operators supported by SQLite. +# +foreach {op opn} { + || cat * mul / div % mod + add + - sub << lshift >> rshift & bitand | bitor + < less <= lesseq > more >= moreeq = eq1 + == eq2 <> ne1 != ne2 IS is LIKE like + GLOB glob AND and OR or MATCH match REGEXP regexp + {IS NOT} isnt +} { + set ::opname($op) $opn +} +set oplist [list] +foreach {prec opl} { + 1 || + 2 {* / %} + 3 {+ -} + 4 {<< >> & |} + 5 {< <= > >=} + 6 {= == != <> IS {IS NOT} LIKE GLOB MATCH REGEXP} + 7 AND + 8 OR +} { + foreach op $opl { + set ::opprec($op) $prec + lappend oplist $op + } +} + + +# Hook in definitions of MATCH and REGEX. The following implementations +# cause MATCH and REGEX to behave similarly to the == operator. +# +proc matchfunc {a b} { return [expr {$a==$b}] } +proc regexfunc {a b} { return [expr {$a==$b}] } +db func match -argcount 2 matchfunc +db func regexp -argcount 2 regexfunc + +#------------------------------------------------------------------------- +# Test cases e_expr-1.* attempt to verify that all binary operators listed +# in the documentation exist and that the relative precedences of the +# operators are also as the documentation suggests. +# +# EVIDENCE-OF: R-15514-65163 SQLite understands the following binary +# operators, in order from highest to lowest precedence: || * / % + - +# << >> & | < <= > >= = == != <> IS IS +# NOT IN LIKE GLOB MATCH REGEXP AND OR +# +# EVIDENCE-OF: R-38759-38789 Operators IS and IS NOT have the same +# precedence as =. +# + +unset -nocomplain untested +foreach op1 $oplist { + foreach op2 $oplist { + set untested($op1,$op2) 1 + foreach {tn A B C} { + 1 22 45 66 + 2 0 0 0 + 3 0 0 1 + 4 0 1 0 + 5 0 1 1 + 6 1 0 0 + 7 1 0 1 + 8 1 1 0 + 9 1 1 1 + 10 5 6 1 + 11 1 5 6 + 12 1 5 5 + 13 5 5 1 + + 14 5 2 1 + 15 1 4 1 + 16 -1 0 1 + 17 0 1 -1 + + } { + set testname "e_expr-1.$opname($op1).$opname($op2).$tn" + + # If $op2 groups more tightly than $op1, then the result + # of executing $sql1 whould be the same as executing $sql3. + # If $op1 groups more tightly, or if $op1 and $op2 have + # the same precedence, then executing $sql1 should return + # the same value as $sql2. + # + set sql1 "SELECT $A $op1 $B $op2 $C" + set sql2 "SELECT ($A $op1 $B) $op2 $C" + set sql3 "SELECT $A $op1 ($B $op2 $C)" + + set a2 [db one $sql2] + set a3 [db one $sql3] + + do_execsql_test $testname $sql1 [list [ + if {$opprec($op2) < $opprec($op1)} {set a3} {set a2} + ]] + if {$a2 != $a3} { unset -nocomplain untested($op1,$op2) } + } + } +} + +foreach op {* AND OR + || & |} { unset untested($op,$op) } +unset untested(+,-) ;# Since (a+b)-c == a+(b-c) +unset untested(*,<<) ;# Since (a*b)< work as the not-equals operator. +# +# EVIDENCE-OF: R-03679-60639 Equals can be either = or ==. +# +# EVIDENCE-OF: R-30082-38996 The non-equals operator can be either != or +# <>. +# +foreach {tn literal different} { + 1 'helloworld' '12345' + 2 22 23 + 3 'xyz' X'78797A' + 4 X'78797A00' 'xyz' +} { + do_execsql_test e_expr-4.$tn " + SELECT $literal = $literal, $literal == $literal, + $literal = $different, $literal == $different, + $literal = NULL, $literal == NULL, + $literal != $literal, $literal <> $literal, + $literal != $different, $literal <> $different, + $literal != NULL, $literal != NULL + + " {1 1 0 0 {} {} 0 0 1 1 {} {}} +} + +#------------------------------------------------------------------------- +# Test the || operator. +# +# EVIDENCE-OF: R-44409-62641 The || operator is "concatenate" - it joins +# together the two strings of its operands. +# +foreach {tn a b} { + 1 'helloworld' '12345' + 2 22 23 +} { + set as [db one "SELECT $a"] + set bs [db one "SELECT $b"] + + do_execsql_test e_expr-5.$tn "SELECT $a || $b" [list "${as}${bs}"] +} + +#------------------------------------------------------------------------- +# Test the % operator. +# +# EVIDENCE-OF: R-08914-63790 The operator % outputs the value of its +# left operand modulo its right operand. +# +do_execsql_test e_expr-6.1 {SELECT 72%5} {2} +do_execsql_test e_expr-6.2 {SELECT 72%-5} {2} +do_execsql_test e_expr-6.3 {SELECT -72%-5} {-2} +do_execsql_test e_expr-6.4 {SELECT -72%5} {-2} + +#------------------------------------------------------------------------- +# Test that the results of all binary operators are either numeric or +# NULL, except for the || operator, which may evaluate to either a text +# value or NULL. +# +# EVIDENCE-OF: R-20665-17792 The result of any binary operator is either +# a numeric value or NULL, except for the || concatenation operator +# which always evaluates to either NULL or a text value. +# +set literals { + 1 'abc' 2 'hexadecimal' 3 '' + 4 123 5 -123 6 0 + 7 123.4 8 0.0 9 -123.4 + 10 X'ABCDEF' 11 X'' 12 X'0000' + 13 NULL +} +foreach op $oplist { + foreach {n1 rhs} $literals { + foreach {n2 lhs} $literals { + + set t [db one " SELECT typeof($lhs $op $rhs) "] + do_test e_expr-7.$opname($op).$n1.$n2 { + expr { + ($op=="||" && ($t == "text" || $t == "null")) + || ($op!="||" && ($t == "integer" || $t == "real" || $t == "null")) + } + } 1 + + }} +} + +#------------------------------------------------------------------------- +# Test the IS and IS NOT operators. +# +# EVIDENCE-OF: R-24731-45773 The IS and IS NOT operators work like = and +# != except when one or both of the operands are NULL. +# +# EVIDENCE-OF: R-06325-15315 In this case, if both operands are NULL, +# then the IS operator evaluates to 1 (true) and the IS NOT operator +# evaluates to 0 (false). +# +# EVIDENCE-OF: R-19812-36779 If one operand is NULL and the other is +# not, then the IS operator evaluates to 0 (false) and the IS NOT +# operator is 1 (true). +# +# EVIDENCE-OF: R-61975-13410 It is not possible for an IS or IS NOT +# expression to evaluate to NULL. +# +do_execsql_test e_expr-8.1.1 { SELECT NULL IS NULL } {1} +do_execsql_test e_expr-8.1.2 { SELECT 'ab' IS NULL } {0} +do_execsql_test e_expr-8.1.3 { SELECT NULL IS 'ab' } {0} +do_execsql_test e_expr-8.1.4 { SELECT 'ab' IS 'ab' } {1} +do_execsql_test e_expr-8.1.5 { SELECT NULL == NULL } {{}} +do_execsql_test e_expr-8.1.6 { SELECT 'ab' == NULL } {{}} +do_execsql_test e_expr-8.1.7 { SELECT NULL == 'ab' } {{}} +do_execsql_test e_expr-8.1.8 { SELECT 'ab' == 'ab' } {1} +do_execsql_test e_expr-8.1.9 { SELECT NULL IS NOT NULL } {0} +do_execsql_test e_expr-8.1.10 { SELECT 'ab' IS NOT NULL } {1} +do_execsql_test e_expr-8.1.11 { SELECT NULL IS NOT 'ab' } {1} +do_execsql_test e_expr-8.1.12 { SELECT 'ab' IS NOT 'ab' } {0} +do_execsql_test e_expr-8.1.13 { SELECT NULL != NULL } {{}} +do_execsql_test e_expr-8.1.14 { SELECT 'ab' != NULL } {{}} +do_execsql_test e_expr-8.1.15 { SELECT NULL != 'ab' } {{}} +do_execsql_test e_expr-8.1.16 { SELECT 'ab' != 'ab' } {0} + +foreach {n1 rhs} $literals { + foreach {n2 lhs} $literals { + if {$rhs!="NULL" && $lhs!="NULL"} { + set eq [execsql "SELECT $lhs = $rhs, $lhs != $rhs"] + } else { + set eq [list [expr {$lhs=="NULL" && $rhs=="NULL"}] \ + [expr {$lhs!="NULL" || $rhs!="NULL"}] + ] + } + set test e_expr-8.2.$n1.$n2 + do_execsql_test $test.1 "SELECT $lhs IS $rhs, $lhs IS NOT $rhs" $eq + do_execsql_test $test.2 " + SELECT ($lhs IS $rhs) IS NULL, ($lhs IS NOT $rhs) IS NULL + " {0 0} + } +} + +finish_test diff --git a/test/e_fts3.test b/test/e_fts3.test index 6a14d50..3f7ed0e 100644 --- a/test/e_fts3.test +++ b/test/e_fts3.test @@ -252,6 +252,7 @@ ddl_test 1.3.2.8 { DROP TABLE docs } # Test the examples in section 3 (full-text index queries). # ddl_test 1.4.1.1 { CREATE VIRTUAL TABLE docs USING fts3(title, body) } +unset -nocomplain R foreach {tn title body} { 2 "linux driver" "a device" 3 "driver" "linguistic trick" diff --git a/test/exclusive.test b/test/exclusive.test index 2c1b350..4f20a6b 100644 --- a/test/exclusive.test +++ b/test/exclusive.test @@ -212,7 +212,7 @@ do_test exclusive-2.8 { ROLLBACK; } db2 } {} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) do_test exclusive-2.9 { # Write the database to establish the exclusive lock with connection 'db. diff --git a/test/exclusive2.test b/test/exclusive2.test index f38ddcf..00e2c16 100644 --- a/test/exclusive2.test +++ b/test/exclusive2.test @@ -299,6 +299,6 @@ do_test exclusive2-3.6 { } readPagerChangeCounter test.db } {5} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) finish_test diff --git a/test/filectrl.test b/test/filectrl.test index fe89a62..67e8183 100644 --- a/test/filectrl.test +++ b/test/filectrl.test @@ -34,7 +34,7 @@ do_test filectrl-1.4 { do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db - file_control_lockproxy_test db + file_control_lockproxy_test db [pwd] } {} db close file delete -force .test_control_lockproxy.db-conch test.proxy diff --git a/test/filefmt.test b/test/filefmt.test index 07cc5ca..28bc5fc 100644 --- a/test/filefmt.test +++ b/test/filefmt.test @@ -16,6 +16,12 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl + +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + db close file delete -force test.db test.db-journal diff --git a/test/fkey2.test b/test/fkey2.test index 10a624e..c530e9f 100644 --- a/test/fkey2.test +++ b/test/fkey2.test @@ -74,9 +74,15 @@ ifcapable {!foreignkey||!trigger} { # fkey2-18.*: Test that the authorization callback is invoked when processing # FK constraints. # +# fkey2-20.*: Test that ON CONFLICT clauses specified as part of statements +# do not affect the operation of FK constraints. +# # fkey2-genfkey.*: Tests that were used with the shell tool .genfkey # command. Recycled to test the built-in implementation. # +# fkey2-dd08e5.*: Tests to verify that ticket dd08e5a988d00decc4a543daa8d +# has been fixed. +# execsql { PRAGMA foreign_keys = on } @@ -1410,7 +1416,7 @@ do_test fkey2-17.1.2 { } {SQLITE_CONSTRAINT} do_test fkey2-17.1.3 { sqlite3_step $STMT -} {SQLITE_MISUSE} +} {SQLITE_CONSTRAINT} do_test fkey2-17.1.4 { sqlite3_finalize $STMT } {SQLITE_CONSTRAINT} @@ -1598,6 +1604,121 @@ ifcapable auth { unset authargs } + +do_test fkey2-19.1 { + execsql { + CREATE TABLE main(id INTEGER PRIMARY KEY); + CREATE TABLE sub(id INT REFERENCES main(id)); + INSERT INTO main VALUES(1); + INSERT INTO main VALUES(2); + INSERT INTO sub VALUES(2); + } +} {} +do_test fkey2-19.2 { + set S [sqlite3_prepare_v2 db "DELETE FROM main WHERE id = ?" -1 dummy] + sqlite3_bind_int $S 1 2 + sqlite3_step $S +} {SQLITE_CONSTRAINT} +do_test fkey2-19.3 { + sqlite3_reset $S +} {SQLITE_CONSTRAINT} +do_test fkey2-19.4 { + sqlite3_bind_int $S 1 1 + sqlite3_step $S +} {SQLITE_DONE} +do_test fkey2-19.4 { + sqlite3_finalize $S +} {SQLITE_OK} + +drop_all_tables +do_test fkey2-20.1 { + execsql { + CREATE TABLE pp(a PRIMARY KEY, b); + CREATE TABLE cc(c PRIMARY KEY, d REFERENCES pp); + } +} {} + +foreach {tn insert} { + 1 "INSERT" + 2 "INSERT OR IGNORE" + 3 "INSERT OR ABORT" + 4 "INSERT OR ROLLBACK" + 5 "INSERT OR REPLACE" + 6 "INSERT OR FAIL" +} { + do_test fkey2-20.2.$tn.1 { + catchsql "$insert INTO cc VALUES(1, 2)" + } {1 {foreign key constraint failed}} + do_test fkey2-20.2.$tn.2 { + execsql { SELECT * FROM cc } + } {} + do_test fkey2-20.2.$tn.3 { + execsql { + BEGIN; + INSERT INTO pp VALUES(2, 'two'); + INSERT INTO cc VALUES(1, 2); + } + catchsql "$insert INTO cc VALUES(3, 4)" + } {1 {foreign key constraint failed}} + do_test fkey2-20.2.$tn.4 { + execsql { COMMIT ; SELECT * FROM cc } + } {1 2} + do_test fkey2-20.2.$tn.5 { + execsql { DELETE FROM cc ; DELETE FROM pp } + } {} +} + +foreach {tn update} { + 1 "UPDATE" + 2 "UPDATE OR IGNORE" + 3 "UPDATE OR ABORT" + 4 "UPDATE OR ROLLBACK" + 5 "UPDATE OR REPLACE" + 6 "UPDATE OR FAIL" +} { + do_test fkey2-20.3.$tn.1 { + execsql { + INSERT INTO pp VALUES(2, 'two'); + INSERT INTO cc VALUES(1, 2); + } + } {} + do_test fkey2-20.3.$tn.2 { + catchsql "$update pp SET a = 1" + } {1 {foreign key constraint failed}} + do_test fkey2-20.3.$tn.3 { + execsql { SELECT * FROM pp } + } {2 two} + do_test fkey2-20.3.$tn.4 { + catchsql "$update cc SET d = 1" + } {1 {foreign key constraint failed}} + do_test fkey2-20.3.$tn.5 { + execsql { SELECT * FROM cc } + } {1 2} + do_test fkey2-20.3.$tn.6 { + execsql { + BEGIN; + INSERT INTO pp VALUES(3, 'three'); + } + catchsql "$update pp SET a = 1 WHERE a = 2" + } {1 {foreign key constraint failed}} + do_test fkey2-20.3.$tn.7 { + execsql { COMMIT ; SELECT * FROM pp } + } {2 two 3 three} + do_test fkey2-20.3.$tn.8 { + execsql { + BEGIN; + INSERT INTO cc VALUES(2, 2); + } + catchsql "$update cc SET d = 1 WHERE c = 1" + } {1 {foreign key constraint failed}} + do_test fkey2-20.3.$tn.9 { + execsql { COMMIT ; SELECT * FROM cc } + } {1 2 2 2} + do_test fkey2-20.3.$tn.10 { + execsql { DELETE FROM cc ; DELETE FROM pp } + } {} +} + #------------------------------------------------------------------------- # The following block of tests, those prefixed with "fkey2-genfkey.", are # the same tests that were used to test the ".genfkey" command provided @@ -1774,4 +1895,46 @@ do_test fkey2-genfkey.3.6 { } } {hello {} {}} +#------------------------------------------------------------------------- +# Verify that ticket dd08e5a988d00decc4a543daa8dbbfab9c577ad8 has been +# fixed. +# +do_test fkey2-dd08e5.1.1 { + execsql { + PRAGMA foreign_keys=ON; + CREATE TABLE tdd08(a INTEGER PRIMARY KEY, b); + CREATE UNIQUE INDEX idd08 ON tdd08(a,b); + INSERT INTO tdd08 VALUES(200,300); + + CREATE TABLE tdd08_b(w,x,y, FOREIGN KEY(x,y) REFERENCES tdd08(a,b)); + INSERT INTO tdd08_b VALUES(100,200,300); + } +} {} +do_test fkey2-dd08e5.1.2 { + catchsql { + DELETE FROM tdd08; + } +} {1 {foreign key constraint failed}} +do_test fkey2-dd08e5.1.3 { + execsql { + SELECT * FROM tdd08; + } +} {200 300} +do_test fkey2-dd08e5.1.4 { + catchsql { + INSERT INTO tdd08_b VALUES(400,500,300); + } +} {1 {foreign key constraint failed}} +do_test fkey2-dd08e5.1.5 { + catchsql { + UPDATE tdd08_b SET x=x+1; + } +} {1 {foreign key constraint failed}} +do_test fkey2-dd08e5.1.6 { + catchsql { + UPDATE tdd08 SET a=a+1; + } +} {1 {foreign key constraint failed}} + + finish_test diff --git a/test/fts2.test b/test/fts2.test index fa49b06..b1e2959 100644 --- a/test/fts2.test +++ b/test/fts2.test @@ -21,7 +21,7 @@ while {[set arg [lshift argv]] != ""} { sqlite3_enable_shared_cache 1 } -soak { - set SOAKTEST 1 + set G(issoak) 1 } default { set argv [linsert $argv 0 $arg] @@ -38,7 +38,7 @@ ifcapable !fts2 { } rename finish_test really_finish_test proc finish_test {} {} -set ISQUICK 1 +set G(isquick) 1 set EXCLUDE { fts2.test @@ -58,8 +58,7 @@ foreach testfile [lsort -dictionary [glob $testdir/fts2*.test]] { catch {db close} if {$sqlite_open_file_count>0} { puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail + fail_test $tail set sqlite_open_file_count 0 } } diff --git a/test/fts3.test b/test/fts3.test index b22ec88..f42220b 100644 --- a/test/fts3.test +++ b/test/fts3.test @@ -9,62 +9,11 @@ # # $Id: fts3.test,v 1.2 2008/07/23 18:17:32 drh Exp $ -proc lshift {lvar} { - upvar $lvar l - set ret [lindex $l 0] - set l [lrange $l 1 end] - return $ret -} -while {[set arg [lshift argv]] != ""} { - switch -- $arg { - -sharedpagercache { - sqlite3_enable_shared_cache 1 - } - -soak { - set SOAKTEST 1 - } - default { - set argv [linsert $argv 0 $arg] - break - } - } -} - set testdir [file dirname $argv0] -source $testdir/tester.tcl -# If SQLITE_ENABLE_FTS3 is defined, omit this file. -ifcapable !fts3 { - return -} -rename finish_test really_finish_test -proc finish_test {} {} -set ISQUICK 1 +source $testdir/permutations.test -set EXCLUDE { - fts3.test - fts3malloc.test - fts3rnd.test +ifcapable fts3 { + run_test_suite fts3 } -# Files to include in the test. If this list is empty then everything -# that is not in the EXCLUDE list is run. -# -set INCLUDE { -} - -foreach testfile [lsort -dictionary [glob $testdir/fts3*.test]] { - set tail [file tail $testfile] - if {[lsearch -exact $EXCLUDE $tail]>=0} continue - if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue - source $testfile - catch {db close} - if {$sqlite_open_file_count>0} { - puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail - set sqlite_open_file_count 0 - } -} - -set sqlite_open_file_count 0 -really_finish_test +finish_test diff --git a/test/fts3_common.tcl b/test/fts3_common.tcl index ce6678d..4d4ae38 100644 --- a/test/fts3_common.tcl +++ b/test/fts3_common.tcl @@ -288,155 +288,3 @@ proc fts3_read {tbl where varname} { ########################################################################## -#------------------------------------------------------------------------- -# This proc is used to test a single SELECT statement. Parameter $name is -# passed a name for the test case (i.e. "fts3_malloc-1.4.1") and parameter -# $sql is passed the text of the SELECT statement. Parameter $result is -# set to the expected output if the SELECT statement is successfully -# executed using [db eval]. -# -# Example: -# -# do_select_test testcase-1.1 "SELECT 1+1, 1+2" {1 2} -# -# If global variable DO_MALLOC_TEST is set to a non-zero value, or if -# it is not defined at all, then OOM testing is performed on the SELECT -# statement. Each OOM test case is said to pass if either (a) executing -# the SELECT statement succeeds and the results match those specified -# by parameter $result, or (b) TCL throws an "out of memory" error. -# -# If DO_MALLOC_TEST is defined and set to zero, then the SELECT statement -# is executed just once. In this case the test case passes if the results -# match the expected results passed via parameter $result. -# -proc do_select_test {name sql result} { - uplevel [list doPassiveTest 0 $name $sql [list 0 $result]] -} - -proc do_restart_select_test {name sql result} { - uplevel [list doPassiveTest 1 $name $sql [list 0 $result]] -} - -proc do_error_test {name sql error} { - uplevel [list doPassiveTest 0 $name $sql [list 1 $error]] -} - -proc doPassiveTest {isRestart name sql catchres} { - if {![info exists ::DO_MALLOC_TEST]} { set ::DO_MALLOC_TEST 1 } - - switch $::DO_MALLOC_TEST { - 0 { # No malloc failures. - do_test $name [list set {} [uplevel [list catchsql $sql]]] $catchres - return - } - 1 { # Simulate transient failures. - set nRepeat 1 - set zName "transient" - set nStartLimit 100000 - set nBackup 1 - } - 2 { # Simulate persistent failures. - set nRepeat 1 - set zName "persistent" - set nStartLimit 100000 - set nBackup 1 - } - 3 { # Simulate transient failures with extra brute force. - set nRepeat 100000 - set zName "ridiculous" - set nStartLimit 1 - set nBackup 10 - } - } - - # The set of acceptable results from running [catchsql $sql]. - # - set answers [list {1 {out of memory}} $catchres] - set str [join $answers " OR "] - - set nFail 1 - for {set iLimit $nStartLimit} {$nFail} {incr iLimit} { - for {set iFail 1} {$nFail && $iFail<=$iLimit} {incr iFail} { - for {set iTest 0} {$iTest<$nBackup && ($iFail-$iTest)>0} {incr iTest} { - - if {$isRestart} { sqlite3 db test.db } - - sqlite3_memdebug_fail [expr $iFail-$iTest] -repeat $nRepeat - set res [uplevel [list catchsql $sql]] - if {[lsearch -exact $answers $res]>=0} { set res $str } - set testname "$name.$zName.$iFail" - do_test "$name.$zName.$iLimit.$iFail" [list set {} $res] $str - - set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] - } - } - } -} - - -#------------------------------------------------------------------------- -# Test a single write to the database. In this case a "write" is a -# DELETE, UPDATE or INSERT statement. -# -# If OOM testing is performed, there are several acceptable outcomes: -# -# 1) The write succeeds. No error is returned. -# -# 2) An "out of memory" exception is thrown and: -# -# a) The statement has no effect, OR -# b) The current transaction is rolled back, OR -# c) The statement succeeds. This can only happen if the connection -# is in auto-commit mode (after the statement is executed, so this -# includes COMMIT statements). -# -# If the write operation eventually succeeds, zero is returned. If a -# transaction is rolled back, non-zero is returned. -# -# Parameter $name is the name to use for the test case (or test cases). -# The second parameter, $tbl, should be the name of the database table -# being modified. Parameter $sql contains the SQL statement to test. -# -proc do_write_test {name tbl sql} { - if {![info exists ::DO_MALLOC_TEST]} { set ::DO_MALLOC_TEST 1 } - - # Figure out an statement to get a checksum for table $tbl. - db eval "SELECT * FROM $tbl" V break - set cksumsql "SELECT md5sum([join [concat rowid $V(*)] ,]) FROM $tbl" - - # Calculate the initial table checksum. - set cksum1 [db one $cksumsql] - - if {$::DO_MALLOC_TEST } { - set answers [list {1 {out of memory}} {0 {}}] - if {$::DO_MALLOC_TEST==1} { - set modes {100000 transient} - } else { - set modes {1 persistent} - } - } else { - set answers [list {0 {}}] - set modes [list 0 nofail] - } - set str [join $answers " OR "] - - foreach {nRepeat zName} $modes { - for {set iFail 1} 1 {incr iFail} { - if {$::DO_MALLOC_TEST} {sqlite3_memdebug_fail $iFail -repeat $nRepeat} - - set res [uplevel [list catchsql $sql]] - set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] - if {$nFail==0} { - do_test $name.$zName.$iFail [list set {} $res] {0 {}} - return - } else { - if {[lsearch $answers $res]>=0} { - set res $str - } - do_test $name.$zName.$iFail [list set {} $res] $str - set cksum2 [db one $cksumsql] - if {$cksum1 != $cksum2} return - } - } - } -} diff --git a/test/fts3an.test b/test/fts3an.test index 77ca9e5..5211027 100644 --- a/test/fts3an.test +++ b/test/fts3an.test @@ -185,12 +185,34 @@ do_test fts3an-3.1 { set t } $ret -# TODO(shess) It would be useful to test a couple edge cases, but I -# don't know if we have the precision to manage it from here at this -# time. Prefix hits can cross leaves, which the code above _should_ -# hit by virtue of size. There are two variations on this. If the -# tree is 2 levels high, the code will find the leaf-node extent -# directly, but if it is higher, the code will have to follow two -# separate interior branches down the tree. Both should be tested. +# Test a boundary condition: More than 2^16 terms that match a searched for +# prefix in a single segment. +# +puts "This next test can take a little while (~ 30 seconds)..." +do_test fts3an-4.1 { + execsql { CREATE VIRTUAL TABLE ft USING fts3(x) } + execsql BEGIN + execsql { INSERT INTO ft VALUES(NULL) } + execsql { INSERT INTO ft SELECT * FROM ft } ;# 2 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 4 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 8 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 16 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 32 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 64 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 128 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 256 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 512 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 1024 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 2048 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 4096 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 8192 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 16384 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 32768 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 65536 + execsql { INSERT INTO ft SELECT * FROM ft } ;# 131072 + execsql COMMIT + execsql { UPDATE ft SET x = 'abc' || rowid } + execsql { SELECT count(*) FROM ft WHERE x MATCH 'abc*' } +} {131072} finish_test diff --git a/test/fts3query.test b/test/fts3query.test index b5af50a..68467df 100644 --- a/test/fts3query.test +++ b/test/fts3query.test @@ -20,6 +20,7 @@ source $testdir/tester.tcl # If this build does not include FTS3, skip the tests in this file. # ifcapable !fts3 { finish_test ; return } +source $testdir/malloc_common.tcl source $testdir/fts3_common.tcl set DO_MALLOC_TEST 0 @@ -101,5 +102,33 @@ do_test fts3query-3.3 { execsql { SELECT mit(matchinfo(foobar)) FROM foobar WHERE foobar MATCH 'the' } } {{1 1 3 3 1}} +# The following tests check that ticket 775b39dd3c has been fixed. +# +proc eqp {sql} { + uplevel [list execsql "EXPLAIN QUERY PLAN $sql"] +} +do_test fts3query-4.1 { + execsql { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(number INTEGER PRIMARY KEY, date); + CREATE INDEX i1 ON t1(date); + CREATE VIRTUAL TABLE ft USING fts3(title); + CREATE TABLE bt(title); + } +} {} +do_test fts3query-4.2 { + eqp "SELECT t1.number FROM t1, ft WHERE t1.number=ft.rowid ORDER BY t1.date" +} {0 0 {TABLE t1 WITH INDEX i1 ORDER BY} 1 1 {TABLE ft VIRTUAL TABLE INDEX 1:}} +do_test fts3query-4.3 { + eqp "SELECT t1.number FROM ft, t1 WHERE t1.number=ft.rowid ORDER BY t1.date" +} {0 1 {TABLE t1 WITH INDEX i1 ORDER BY} 1 0 {TABLE ft VIRTUAL TABLE INDEX 1:}} +do_test fts3query-4.4 { + eqp "SELECT t1.number FROM t1, bt WHERE t1.number=bt.rowid ORDER BY t1.date" +} {0 0 {TABLE t1 WITH INDEX i1 ORDER BY} 1 1 {TABLE bt USING PRIMARY KEY}} +do_test fts3query-4.5 { + eqp "SELECT t1.number FROM bt, t1 WHERE t1.number=bt.rowid ORDER BY t1.date" +} {0 1 {TABLE t1 WITH INDEX i1 ORDER BY} 1 0 {TABLE bt USING PRIMARY KEY}} + + finish_test diff --git a/test/fts3rnd.test b/test/fts3rnd.test index 2dcde0f..fe1ea5e 100644 --- a/test/fts3rnd.test +++ b/test/fts3rnd.test @@ -82,6 +82,7 @@ source $testdir/tester.tcl # ifcapable !fts3 { finish_test ; return } source $testdir/fts3_common.tcl +source $testdir/malloc_common.tcl set G(nVocab) 100 diff --git a/test/fuzz.test b/test/fuzz.test index 5111107..e1b22ae 100644 --- a/test/fuzz.test +++ b/test/fuzz.test @@ -27,8 +27,8 @@ source $testdir/tester.tcl set ::REPEATS 5000 # If running quick.test, don't do so many iterations. -if {[info exists ::ISQUICK]} { - if {$::ISQUICK} { set ::REPEATS 20 } +if {[info exists ::G(isquick)]} { + if {$::G(isquick)} { set ::REPEATS 20 } } source $testdir/fuzz_common.tcl diff --git a/test/fuzz_malloc.test b/test/fuzz_malloc.test index b9b7dd1..f2209aa 100644 --- a/test/fuzz_malloc.test +++ b/test/fuzz_malloc.test @@ -25,9 +25,9 @@ ifcapable !memdebug { source $testdir/malloc_common.tcl source $testdir/fuzz_common.tcl -if {[info exists ISQUICK]} { +if {[info exists G(isquick)]} { set ::REPEATS 20 -} elseif {[info exists SOAKTEST]} { +} elseif {[info exists G(issoak)]} { set ::REPEATS 100 } else { set ::REPEATS 40 diff --git a/test/hook.test b/test/hook.test index b526708..6496d41 100644 --- a/test/hook.test +++ b/test/hook.test @@ -334,4 +334,31 @@ do_test hook-5.2.2 { # End rollback-hook testing. #---------------------------------------------------------------------------- +#---------------------------------------------------------------------------- +# Test that if a commit-hook returns non-zero (causing a rollback), the +# rollback-hook is invoked. +# +proc commit_hook {} { + lappend ::hooks COMMIT + return 1 +} +proc rollback_hook {} { + lappend ::hooks ROLLBACK +} +do_test hook-6.1 { + set ::hooks [list] + db commit_hook commit_hook + db rollback_hook rollback_hook + catchsql { + BEGIN; + INSERT INTO t1 VALUES('two', 'II'); + COMMIT; + } + execsql { SELECT * FROM t1 } +} {one I} +do_test hook-6.2 { + set ::hooks +} {COMMIT ROLLBACK} +unset ::hooks + finish_test diff --git a/test/incrblob.test b/test/incrblob.test index f4e6aee..0865912 100644 --- a/test/incrblob.test +++ b/test/incrblob.test @@ -208,7 +208,7 @@ foreach AutoVacuumMode [list 0 1] { nRead db } [expr $AutoVacuumMode ? 4 : 30] } -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) #------------------------------------------------------------------------ # incrblob-3.*: @@ -500,7 +500,7 @@ do_test incrblob-6.15 { } } {a different invocation} db2 close -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) #----------------------------------------------------------------------- # The following tests verify the behaviour of the incremental IO diff --git a/test/incrvacuum.test b/test/incrvacuum.test index fe10acb..d037d8e 100644 --- a/test/incrvacuum.test +++ b/test/incrvacuum.test @@ -727,15 +727,18 @@ do_test incrvacuum-13.5 { # Verify that the incremental_vacuum pragma fails gracefully if it # is used against an invalid database file. # -do_test incrvacuum-14.1 { - set out [open invalid.db w] - puts $out "This is not an SQLite database file" - close $out - sqlite3 db3 invalid.db - catchsql { - PRAGMA incremental_vacuum(10); - } db3 -} {1 {file is encrypted or is not a database}} +if {[permutation] == ""} { + do_test incrvacuum-14.1 { + set out [open invalid.db w] + puts $out "This is not an SQLite database file" + close $out + sqlite3 db3 invalid.db + catchsql { + PRAGMA incremental_vacuum(10); + } db3 + } {1 {file is encrypted or is not a database}} + db3 close +} do_test incrvacuum-15.1 { db close @@ -780,5 +783,4 @@ do_test incrvacuum-15.1 { } } {ok} -db3 close finish_test diff --git a/test/index3.test b/test/index3.test index 9549f55..161ddec 100644 --- a/test/index3.test +++ b/test/index3.test @@ -49,10 +49,8 @@ do_test index3-99.1 { UPDATE sqlite_master SET sql='nonsense'; } db close - sqlite3 db test.db - catchsql { - DROP INDEX i1; - } + catch { sqlite3 db test.db } + catchsql { DROP INDEX i1 } } {1 {malformed database schema (t1) - near "nonsense": syntax error}} finish_test diff --git a/test/init.test b/test/init.test index efd6b4c..7cb18f4 100644 --- a/test/init.test +++ b/test/init.test @@ -17,6 +17,10 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +if {[db eval {SELECT sqlite_compileoption_used('THREADSAFE=0')}]} { + finish_test + return +} db close @@ -76,4 +80,3 @@ if {$MEMDEBUG} { autoinstall_test_functions finish_test - diff --git a/test/io.test b/test/io.test index 3848aff..0cc26ef 100644 --- a/test/io.test +++ b/test/io.test @@ -200,10 +200,16 @@ do_test io-2.5.3 { # journal file is not actually created until the 'COMMIT' statement # is executed. # +# Changed 2010-03-27: The size of the database is now stored in +# bytes 28..31 and so when a page is added to the database, page 1 +# is immediately modified and the journal file immediately comes into +# existance. To fix this test, the BEGIN is changed into a a +# BEGIN IMMEDIATE and the INSERT is omitted. +# do_test io-2.6.1 { execsql { - BEGIN; - INSERT INTO abc VALUES(9, randstr(1000,1000)); + BEGIN IMMEDIATE; + -- INSERT INTO abc VALUES(9, randstr(1000,1000)); } file exists test.db-journal } {0} @@ -213,12 +219,15 @@ do_test io-2.6.2 { # should fail with SQLITE_CANTOPEN and the transaction rolled back. # file mkdir test.db-journal - catchsql { COMMIT } + catchsql { + INSERT INTO abc VALUES(9, randstr(1000,1000)); + COMMIT + } } {1 {unable to open database file}} do_test io-2.6.3 { file delete -force test.db-journal catchsql { COMMIT } -} {1 {cannot commit - no transaction is active}} +} {0 {}} do_test io-2.6.4 { execsql { SELECT * FROM abc } } {1 2 3 4 5 6 7 8} diff --git a/test/journal2.test b/test/journal2.test new file mode 100644 index 0000000..2b5d8fa --- /dev/null +++ b/test/journal2.test @@ -0,0 +1,234 @@ +# 2010 June 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests SQLite when using a VFS that claims the SAFE_DELETE property. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl +db close + +if {[permutation] == "inmemory_journal"} { + finish_test + return +} + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} + +# Create a [testvfs] and install it as the default VFS. Set the device +# characteristics flags to "SAFE_DELETE". +# +testvfs tvfs -default 1 +tvfs devchar undeletable_when_open + +# Set up a hook so that each time a journal file is opened, closed or +# deleted, the method name ("xOpen", "xClose" or "xDelete") and the final +# segment of the journal file-name (i.e. "test.db-journal") are appended to +# global list variable $::oplog. +# +tvfs filter {xOpen xClose xDelete} +tvfs script journal_op_catcher +proc journal_op_catcher {method filename args} { + + # If global variable ::tvfs_error_on_write is defined, then return an + # IO error to every attempt to modify the file-system. Otherwise, return + # SQLITE_OK. + # + if {[info exists ::tvfs_error_on_write]} { + if {[lsearch {xDelete xWrite xTruncate} $method]>=0} { + return SQLITE_IOERR + } + } + + # The rest of this command only deals with xOpen(), xClose() and xDelete() + # operations on journal files. If this invocation does not represent such + # an operation, return with no further ado. + # + set f [file tail $filename] + if {[string match *journal $f]==0} return + if {[lsearch {xOpen xDelete xClose} $method]<0} return + + # Append a record of this operation to global list variable $::oplog. + # + lappend ::oplog $method $f + + # If this is an attempt to delete a journal file for which there exists + # one ore more open handles, return an error. The code in test_vfs.c + # will not invoke the xDelete method of the "real" VFS in this case. + # + if {[info exists ::open_journals($f)]==0} { set ::open_journals($f) 0 } + switch -- $method { + xOpen { incr ::open_journals($f) +1 } + xClose { incr ::open_journals($f) -1 } + xDelete { if {$::open_journals($f)>0} { return SQLITE_IOERR } } + } + + return "" +} + + +do_test journal2-1.1 { + set ::oplog [list] + sqlite3 db test.db + execsql { CREATE TABLE t1(a, b) } + set ::oplog +} {xOpen test.db-journal xClose test.db-journal xDelete test.db-journal} +do_test journal2-1.2 { + set ::oplog [list] + execsql { + PRAGMA journal_mode = truncate; + INSERT INTO t1 VALUES(1, 2); + } + set ::oplog +} {xOpen test.db-journal} +do_test journal2-1.3 { + set ::oplog [list] + execsql { INSERT INTO t1 VALUES(3, 4) } + set ::oplog +} {} +do_test journal2-1.4 { execsql { SELECT * FROM t1 } } {1 2 3 4} + +# Add a second connection. This connection attempts to commit data in +# journal_mode=DELETE mode. When it tries to delete the journal file, +# the VFS layer returns an IO error. +# +do_test journal2-1.5 { + set ::oplog [list] + sqlite3 db2 test.db + execsql { PRAGMA journal_mode = delete } db2 + catchsql { INSERT INTO t1 VALUES(5, 6) } db2 +} {1 {disk I/O error}} +do_test journal2-1.6 { file exists test.db-journal } 1 +do_test journal2-1.7 { execsql { SELECT * FROM t1 } } {1 2 3 4} +do_test journal2-1.8 { + execsql { PRAGMA journal_mode = truncate } db2 + execsql { INSERT INTO t1 VALUES(5, 6) } db2 +} {} +do_test journal2-1.9 { execsql { SELECT * FROM t1 } } {1 2 3 4 5 6} + +# Grow the database until it is reasonably large. +# +do_test journal2-1.10 { + db2 close + db func a_string a_string + execsql { + CREATE TABLE t2(a UNIQUE, b UNIQUE); + INSERT INTO t2 VALUES(a_string(200), a_string(300)); + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 2 + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 4 + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 8 + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 16 + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 32 + INSERT INTO t2 SELECT a_string(200), a_string(300) FROM t2; -- 64 + } + file size test.db-journal +} {0} +do_test journal2-1.11 { + set sz [expr [file size test.db] / 1024] + expr {$sz>120 && $sz<200} +} 1 + +# Using new connection [db2] (with journal_mode=DELETE), write a lot of +# data to the database. So that many pages within the database file are +# modified before the transaction is committed. +# +# Then, enable simulated IO errors in all calls to xDelete, xWrite +# and xTruncate before committing the transaction and closing the +# database file. From the point of view of other file-system users, it +# appears as if the process hosting [db2] unexpectedly exited. +# +do_test journal2-1.12 { + sqlite3 db2 test.db + execsql { + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO t2 SELECT randomblob(200), randomblob(300) FROM t2; -- 128 + } db2 +} {} +do_test journal2-1.13 { + tvfs filter {xOpen xClose xDelete xWrite xTruncate} + set ::tvfs_error_on_write 1 + catchsql { COMMIT } db2 +} {1 {disk I/O error}} +db2 close +unset ::tvfs_error_on_write +file copy -force test.db testX.db + +do_test journal2-1.14 { file exists test.db-journal } 1 +do_test journal2-1.15 { + execsql { + SELECT count(*) FROM t2; + PRAGMA integrity_check; + } +} {64 ok} + +# This block checks that in the test case above, connection [db2] really +# did begin writing to the database file before it hit IO errors. If +# this is true, then the copy of the database file made before [db] +# rolled back the hot journal should fail the integrity-check. +# +do_test journal2-1.16 { + set sz [expr [file size testX.db] / 1024] + expr {$sz>240 && $sz<400} +} 1 +do_test journal2-1.17 { + expr {[catchsql { PRAGMA integrity_check } db] == "0 ok"} +} {1} +do_test journal2-1.20 { + sqlite3 db2 testX.db + expr {[catchsql { PRAGMA integrity_check } db2] == "0 ok"} +} {0} +do_test journal2-1.21 { + db2 close +} {} +db close + +#------------------------------------------------------------------------- +# Test that it is possible to switch from journal_mode=truncate to +# journal_mode=WAL on a SAFE_DELETE file-system. SQLite should close and +# delete the journal file when committing the transaction that switches +# the system to WAL mode. +# +ifcapable wal { + do_test journal2-2.1 { + faultsim_delete_and_reopen + set ::oplog [list] + execsql { PRAGMA journal_mode = persist } + set ::oplog + } {} + do_test journal2-2.2 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(3.14159); + } + set ::oplog + } {xOpen test.db-journal} + do_test journal2-2.3 { + expr {[file size test.db-journal] > 512} + } {1} + do_test journal2-2.4 { + set ::oplog [list] + execsql { PRAGMA journal_mode = WAL } + set ::oplog + } {xClose test.db-journal xDelete test.db-journal} + db close +} + +tvfs delete +finish_test + diff --git a/test/journal3.test b/test/journal3.test new file mode 100644 index 0000000..7d29722 --- /dev/null +++ b/test/journal3.test @@ -0,0 +1,60 @@ +# 2010 July 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +#------------------------------------------------------------------------- +# If a connection is required to create a journal file, it creates it with +# the same file-system permissions as the database file itself. Test this. +# +if {$::tcl_platform(platform) == "unix"} { + + set umask [exec /bin/sh -c umask] + faultsim_delete_and_reopen + do_test journal3-1.1 { execsql { CREATE TABLE tx(y, z) } } {} + + foreach {tn permissions} { + 1 00644 + 2 00666 + 3 00600 + 4 00755 + } { + db close + set effective [format %.5o [expr $permissions & ~$umask]] + do_test journal3-1.2.$tn.1 { + catch { file delete -force test.db-journal } + file attributes test.db -permissions $permissions + file attributes test.db -permissions + } $permissions + do_test journal3-1.2.$tn.2 { file exists test.db-journal } {0} + do_test journal3-1.2.$tn.3 { + sqlite3 db test.db + execsql { + BEGIN; + INSERT INTO tx DEFAULT VALUES; + } + file exists test.db-journal + } {1} + do_test journal3-1.2.$tn.4 { + file attr test.db-journal -perm + } $effective + do_execsql_test journal3-1.2.$tn.5 { ROLLBACK } {} + } + + +} + +finish_test diff --git a/test/jrnlmode.test b/test/jrnlmode.test index ea235d3..f1e9c78 100644 --- a/test/jrnlmode.test +++ b/test/jrnlmode.test @@ -132,7 +132,7 @@ ifcapable attach { execsql { PRAGMA journal_mode; } - } {persist} + } {off} do_test jrnlmode-1.12 { execsql { ATTACH ':memory:' as aux2; @@ -317,9 +317,10 @@ ifcapable pragma { do_test jrnlmode-5.3 { execsql { ATTACH 'test2.db' AS aux; + PRAGMA aux.journal_mode=persist; PRAGMA aux.journal_size_limit; } - } {-1} + } {persist -1} do_test jrnlmode-5.4.1 { execsql { PRAGMA aux.journal_size_limit = 999999999999 } } {999999999999} @@ -337,8 +338,11 @@ ifcapable pragma { } {10240} do_test jrnlmode-5.8 { - execsql { ATTACH 'test3.db' AS aux2 } - } {} + execsql { + ATTACH 'test3.db' AS aux2; + PRAGMA aux2.journal_mode=persist; + } + } {persist} do_test jrnlmode-5.9 { execsql { @@ -454,7 +458,7 @@ ifcapable pragma { ifcapable pragma { # These tests are not run as part of the "journaltest" permutation, # as the test_journal.c layer is incompatible with in-memory journaling. - if {[catch {set ::permutations_test_prefix} z] || $z ne "journaltest"} { + if {[permutation] ne "journaltest"} { do_test jrnlmode-6.1 { execsql { @@ -484,7 +488,7 @@ ifcapable pragma { INSERT INTO t4 VALUES(3, 4); } file exists test.db-journal - } {1} + } {0} do_test jrnlmode-6.7 { execsql { COMMIT; @@ -493,15 +497,31 @@ ifcapable pragma { } {1 2 3 4} do_test jrnlmode-6.8 { file exists test.db-journal - } {1} + } {0} do_test jrnlmode-6.9 { execsql { PRAGMA journal_mode = DELETE; - BEGIN IMMEDIATE; COMMIT; + BEGIN IMMEDIATE; INSERT INTO t4 VALUES(1,2); COMMIT; } file exists test.db-journal } {0} } } +ifcapable pragma { + catch { db close } + do_test jrnlmode-7.1 { + foreach f [glob -nocomplain test.db*] { file delete -force $f } + sqlite3 db test.db + execsql { + PRAGMA journal_mode = memory; + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + PRAGMA user_version = 5; + PRAGMA user_version; + } + } {memory 5} + do_test jrnlmode-7.2 { file size test.db } {1024} +} + finish_test diff --git a/test/jrnlmode2.test b/test/jrnlmode2.test index 9a1fe37..dc3bc27 100644 --- a/test/jrnlmode2.test +++ b/test/jrnlmode2.test @@ -9,7 +9,6 @@ # #*********************************************************************** # -# $Id: jrnlmode2.test,v 1.6 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -20,10 +19,33 @@ ifcapable {!pager_pragmas} { } #------------------------------------------------------------------------- -# Test overview: +# The tests in this file check that the following two bugs (both now fixed) +# do not reappear. # -# jrnlmode2-1.*: Demonstrate bug #3745 -# jrnlmode2-2.*: Demonstrate bug #3751 +# jrnlmode2-1.*: Demonstrate bug #3745: +# +# In persistent journal mode, if: +# +# * There is a persistent journal in the file-system, AND +# * there exists a connection with a shared lock on the db file, +# +# then a second connection cannot open a read-transaction on the database. +# The reason is because while determining that the persistent-journal is +# not a hot-journal, SQLite currently grabs an exclusive lock on the +# database file. If this fails because another connection has a shared +# lock, then SQLITE_BUSY is returned to the user. +# +# jrnlmode2-2.*: Demonstrate bug #3751: +# +# If a connection is opened in SQLITE_OPEN_READONLY mode, the underlying +# unix file descriptor on the database file is opened in O_RDONLY mode. +# +# When SQLite queries the database file for the schema in order to compile +# the SELECT statement, it sees the empty journal in the file system, it +# attempts to obtain an exclusive lock on the database file (this is a +# bug). The attempt to obtain an exclusive (write) lock on a read-only file +# fails at the OS level. Under unix, fcntl() reports an EBADF - "Bad file +# descriptor" - error. # do_test jrnlmode2-1.1 { @@ -46,6 +68,8 @@ do_test jrnlmode2-1.3 { do_test jrnlmode2-1.4 { execsql { INSERT INTO t1 VALUES(3, 4); + } + execsql { BEGIN; SELECT * FROM t1; } @@ -87,9 +111,9 @@ do_test jrnlmode2-2.4 { } {0 {1 2 3 4 5 6}} do_test jrnlmode2-2.5 { + db close file delete test.db-journal } {} - do_test jrnlmode2-2.6 { sqlite3 db2 test.db -readonly 1 catchsql { SELECT * FROM t1 } db2 diff --git a/test/lock2.test b/test/lock2.test index a2b75ca..ea2e557 100644 --- a/test/lock2.test +++ b/test/lock2.test @@ -16,69 +16,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/lock_common.tcl -# Launch another testfixture process to be controlled by this one. A -# channel name is returned that may be passed as the first argument to proc -# 'testfixture' to execute a command. The child testfixture process is shut -# down by closing the channel. -proc launch_testfixture {} { - set prg [info nameofexec] - if {$prg eq ""} { - set prg [file join . testfixture] - } - set chan [open "|$prg tf_main.tcl" r+] - fconfigure $chan -buffering line - return $chan -} - -# Execute a command in a child testfixture process, connected by two-way -# channel $chan. Return the result of the command, or an error message. -proc testfixture {chan cmd} { - puts $chan $cmd - puts $chan OVER - set r "" - while { 1 } { - set line [gets $chan] - if { $line == "OVER" } { - return $r - } - if {[eof $chan]} { - return "ERROR: Child process hung up" - } - append r $line - } -} - -# Write the main loop for the child testfixture processes into file -# tf_main.tcl. The parent (this script) interacts with the child processes -# via a two way pipe. The parent writes a script to the stdin of the child -# process, followed by the word "OVER" on a line of its own. The child -# process evaluates the script and writes the results to stdout, followed -# by an "OVER" of its own. -set f [open tf_main.tcl w] -puts $f { - set l [open log w] - set script "" - while {![eof stdin]} { - flush stdout - set line [gets stdin] - puts $l "READ $line" - if { $line == "OVER" } { - catch {eval $script} result - puts $result - puts $l "WRITE $result" - puts OVER - puts $l "WRITE OVER" - flush stdout - set script "" - } else { - append script $line - append script " ; " - } - } - close $l -} -close $f # Simple locking test case: # @@ -95,7 +34,6 @@ close $f # do_test lock2-1.1 { set ::tf1 [launch_testfixture] - testfixture $::tf1 "sqlite3_test_control_pending_byte $::sqlite_pending_byte" testfixture $::tf1 { sqlite3 db test.db -key xyzzy db eval {select * from sqlite_master} @@ -121,9 +59,8 @@ do_test lock2-1.3 { } {} do_test lock2-1.4 { testfixture $::tf1 { - db eval { - CREATE TABLE def(d, e, f) - } + catch { db eval { CREATE TABLE def(d, e, f) } } msg + set msg } } {database is locked} do_test lock2-1.5 { @@ -141,10 +78,11 @@ do_test lock2-1.6 { } {} do_test lock2-1.7 { testfixture $::tf1 { - db eval { + catch { db eval { BEGIN; SELECT * FROM sqlite_master; - } + } } msg + set msg } } {database is locked} do_test lock2-1.8 { @@ -157,6 +95,7 @@ do_test lock2-1.9 { SELECT * FROM sqlite_master; } } "table abc abc [expr $AUTOVACUUM?3:2] {CREATE TABLE abc(a, b, c)}" +catch flush_async_queue do_test lock2-1.10 { testfixture $::tf1 { db eval { @@ -167,6 +106,6 @@ do_test lock2-1.10 { catch {testfixture $::tf1 {db close}} catch {close $::tf1} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) finish_test diff --git a/test/lock4.test b/test/lock4.test index a8cacb1..06ee88d 100644 --- a/test/lock4.test +++ b/test/lock4.test @@ -17,6 +17,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +do_not_use_codec + # Initialize the test.db database so that it is non-empty # do_test lock4-1.1 { diff --git a/test/lock5.test b/test/lock5.test index 63e7e6b..6ec86ba 100644 --- a/test/lock5.test +++ b/test/lock5.test @@ -121,7 +121,7 @@ do_test lock5-flock.2 { } {0} do_test lock5-flock.3 { - sqlite3 db2 test.db -vfs unix-flock + catch { sqlite3 db2 test.db -vfs unix-flock } catchsql { SELECT * FROM t1 } db2 } {1 {database is locked}} diff --git a/test/lock6.test b/test/lock6.test index b64983a..84f8888 100644 --- a/test/lock6.test +++ b/test/lock6.test @@ -161,8 +161,7 @@ ifcapable lock_proxy_pragmas&&prefer_proxy_locking { set env(SQLITE_FORCE_PROXY_LOCKING) $using_proxy set sqlite_hostid_num 0 - sqlite3_soft_heap_limit $soft_limit - + sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) } finish_test diff --git a/test/lock_common.tcl b/test/lock_common.tcl new file mode 100644 index 0000000..4fe08e0 --- /dev/null +++ b/test/lock_common.tcl @@ -0,0 +1,166 @@ +# 2010 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains code used by several different test scripts. The +# code in this file allows testfixture to control another process (or +# processes) to test locking. +# + +proc do_multiclient_test {varname script} { + + foreach code [list { + set ::code2_chan [launch_testfixture] + set ::code3_chan [launch_testfixture] + proc code2 {tcl} { testfixture $::code2_chan $tcl } + proc code3 {tcl} { testfixture $::code3_chan $tcl } + set tn 1 + } { + proc code2 {tcl} { uplevel #0 $tcl } + proc code3 {tcl} { uplevel #0 $tcl } + set tn 2 + }] { + faultsim_delete_and_reopen + + # Open connections [db2] and [db3]. Depending on which iteration this + # is, the connections may be created in this interpreter, or in + # interpreters running in other OS processes. As such, the [db2] and [db3] + # commands should only be accessed within [code2] and [code3] blocks, + # respectively. + # + eval $code + code2 { sqlite3 db2 test.db } + code3 { sqlite3 db3 test.db } + + # Shorthand commands. Execute SQL using database connection [db2] or + # [db3]. Return the results. + # + proc sql1 {sql} { db eval $sql } + proc sql2 {sql} { code2 [list db2 eval $sql] } + proc sql3 {sql} { code3 [list db3 eval $sql] } + + proc csql1 {sql} { list [catch { sql1 $sql } msg] $msg } + proc csql2 {sql} { list [catch { sql2 $sql } msg] $msg } + proc csql3 {sql} { list [catch { sql3 $sql } msg] $msg } + + uplevel set $varname $tn + uplevel $script + + code2 { db2 close } + code3 { db3 close } + catch { close $::code2_chan } + catch { close $::code3_chan } + } +} + +# Launch another testfixture process to be controlled by this one. A +# channel name is returned that may be passed as the first argument to proc +# 'testfixture' to execute a command. The child testfixture process is shut +# down by closing the channel. +proc launch_testfixture {} { + write_main_loop + set prg [info nameofexec] + if {$prg eq ""} { + set prg [file join . testfixture] + } + set chan [open "|$prg tf_main.tcl" r+] + fconfigure $chan -buffering line + testfixture $chan "sqlite3_test_control_pending_byte $::sqlite_pending_byte" + return $chan +} + +# Execute a command in a child testfixture process, connected by two-way +# channel $chan. Return the result of the command, or an error message. +# +proc testfixture {chan cmd} { + puts $chan $cmd + puts $chan OVER + set r "" + while { 1 } { + set line [gets $chan] + if { $line == "OVER" } { + set res [lindex $r 1] + if { [lindex $r 0] } { error $res } + return $res + } + if {[eof $chan]} { + return "ERROR: Child process hung up" + } + append r $line + } +} + +proc testfixture_nb_cb {varname chan} { + if {[eof $chan]} { + append ::tfnb($chan) "ERROR: Child process hung up" + set line "OVER" + } else { + set line [gets $chan] + } + + if { $line == "OVER" } { + set $varname [lindex $::tfnb($chan) 1] + unset ::tfnb($chan) + close $chan + } else { + append ::tfnb($chan) $line + } +} + +proc testfixture_nb {varname cmd} { + set chan [launch_testfixture] + set ::tfnb($chan) "" + fconfigure $chan -blocking 0 -buffering none + puts $chan $cmd + puts $chan OVER + fileevent $chan readable [list testfixture_nb_cb $varname $chan] + return "" +} + +# Write the main loop for the child testfixture processes into file +# tf_main.tcl. The parent (this script) interacts with the child processes +# via a two way pipe. The parent writes a script to the stdin of the child +# process, followed by the word "OVER" on a line of its own. The child +# process evaluates the script and writes the results to stdout, followed +# by an "OVER" of its own. +# +set main_loop_written 0 +proc write_main_loop {} { + if {$::main_loop_written} return + set wrapper "" + if {[sqlite3 -has-codec] && [info exists ::do_not_use_codec]==0} { + set wrapper " + rename sqlite3 sqlite_orig + proc sqlite3 {args} {[info body sqlite3]} + " + } + + set fd [open tf_main.tcl w] + puts $fd [string map [list %WRAPPER% $wrapper] { + %WRAPPER% + set script "" + while {![eof stdin]} { + flush stdout + set line [gets stdin] + if { $line == "OVER" } { + set rc [catch {eval $script} result] + puts [list $rc $result] + puts OVER + flush stdout + set script "" + } else { + append script $line + append script "\n" + } + } + }] + close $fd + set main_loop_written 1 +} + diff --git a/test/lookaside.test b/test/lookaside.test index 3c44560..bf55494 100644 --- a/test/lookaside.test +++ b/test/lookaside.test @@ -21,6 +21,14 @@ ifcapable !lookaside { return } +# The tests in this file configure the lookaside allocator after a +# connection is opened. This will not work if there is any "presql" +# configured (SQL run within the [sqlite3] wrapper in tester.tcl). +if {[info exists ::G(perm:presql)]} { + finish_test + return +} + catch {db close} sqlite3_shutdown sqlite3_config_pagecache 0 0 @@ -34,12 +42,14 @@ sqlite3 db test.db do_test lookaside-1.1 { catch {sqlite3_config_error db} } {0} + do_test lookaside-1.2 { sqlite3_db_config_lookaside db 1 18 18 } {0} do_test lookaside-1.3 { sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0 } {0 0 0} + do_test lookaside-1.4 { db eval {CREATE TABLE t1(w,x,y,z);} foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0] break diff --git a/test/main.test b/test/main.test index 6248e11..dbf9041 100644 --- a/test/main.test +++ b/test/main.test @@ -298,15 +298,17 @@ ifcapable {trigger} { # Try to open a database with a corrupt database file. # -do_test main-2.0 { - catch {db close} - file delete -force test.db - set fd [open test.db w] - puts $fd hi! - close $fd - set v [catch {sqlite3 db test.db} msg] - if {$v} {lappend v $msg} {lappend v {}} -} {0 {}} +if {[permutation] == ""} { + do_test main-2.0 { + catch {db close} + file delete -force test.db + set fd [open test.db w] + puts $fd hi! + close $fd + set v [catch {sqlite3 db test.db} msg] + if {$v} {lappend v $msg} {lappend v {}} + } {0 {}} +} # Here are some tests for tokenize.c. # diff --git a/test/malloc.test b/test/malloc.test index 8459b28..f1eee17 100644 --- a/test/malloc.test +++ b/test/malloc.test @@ -334,6 +334,7 @@ if {$tcl_platform(platform)!="windows"} { sqlite3 db2 test2.db sqlite3_extended_result_codes db2 1 db2 eval { + PRAGMA journal_mode = DELETE; /* For inmemory_journal permutation */ PRAGMA synchronous = 0; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); diff --git a/test/mallocAll.test b/test/mallocAll.test index 5fbd849..b1c94ea 100644 --- a/test/mallocAll.test +++ b/test/mallocAll.test @@ -30,7 +30,7 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl rename finish_test really_finish_test proc finish_test {} {} -set ISQUICK 1 +set G(isquick) 1 set EXCLUDE { mallocAll.test @@ -56,8 +56,7 @@ foreach testfile [lsort -dictionary [glob $testdir/*malloc*.test]] { catch {db close} if {$sqlite_open_file_count>0} { puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail + fail_test $tail set sqlite_open_file_count 0 } } diff --git a/test/mallocC.test b/test/mallocC.test index 7316704..7943353 100644 --- a/test/mallocC.test +++ b/test/mallocC.test @@ -76,7 +76,6 @@ proc do_mallocC_test {tn args} { #} $sum #integrity_check mallocC-$tn.$::n.4 - if {$::nErr>1} return } unset ::mallocopts } diff --git a/test/mallocI.test b/test/mallocI.test index f4ddaa2..1229455 100644 --- a/test/mallocI.test +++ b/test/mallocI.test @@ -57,6 +57,7 @@ do_malloc_test mallocI-4 -tclprep { # on the database file. catchsql { INSERT INTO t1 VALUES(1, 2, 3) } db2 } {0 {}} + catch {db2 close} } catch { db2 close } diff --git a/test/malloc_common.tcl b/test/malloc_common.tcl index 90f6b06..ef5deac 100644 --- a/test/malloc_common.tcl +++ b/test/malloc_common.tcl @@ -23,6 +23,340 @@ ifcapable builtin_test { return 0 } +# Transient and persistent OOM errors: +# +set FAULTSIM(oom-transient) [list \ + -injectstart {oom_injectstart 0} \ + -injectstop oom_injectstop \ + -injecterrlist {{1 {out of memory}}} \ +] +set FAULTSIM(oom-persistent) [list \ + -injectstart {oom_injectstart 1000000} \ + -injectstop oom_injectstop \ + -injecterrlist {{1 {out of memory}}} \ +] + +# Transient and persistent IO errors: +# +set FAULTSIM(ioerr-transient) [list \ + -injectstart {ioerr_injectstart 0} \ + -injectstop ioerr_injectstop \ + -injecterrlist {{1 {disk I/O error}}} \ +] +set FAULTSIM(ioerr-persistent) [list \ + -injectstart {ioerr_injectstart 1} \ + -injectstop ioerr_injectstop \ + -injecterrlist {{1 {disk I/O error}}} \ +] + +# SQLITE_FULL errors (always persistent): +# +set FAULTSIM(full) [list \ + -injectinstall fullerr_injectinstall \ + -injectstart fullerr_injectstart \ + -injectstop fullerr_injectstop \ + -injecterrlist {{1 {database or disk is full}}} \ + -injectuninstall fullerr_injectuninstall \ +] + +# Transient and persistent SHM errors: +# +set FAULTSIM(shmerr-transient) [list \ + -injectinstall shmerr_injectinstall \ + -injectstart {shmerr_injectstart 0} \ + -injectstop shmerr_injectstop \ + -injecterrlist {{1 {disk I/O error}}} \ + -injectuninstall shmerr_injectuninstall \ +] +set FAULTSIM(shmerr-persistent) [list \ + -injectinstall shmerr_injectinstall \ + -injectstart {shmerr_injectstart 1} \ + -injectstop shmerr_injectstop \ + -injecterrlist {{1 {disk I/O error}}} \ + -injectuninstall shmerr_injectuninstall \ +] + +# Transient and persistent CANTOPEN errors: +# +set FAULTSIM(cantopen-transient) [list \ + -injectinstall cantopen_injectinstall \ + -injectstart {cantopen_injectstart 0} \ + -injectstop cantopen_injectstop \ + -injecterrlist {{1 {unable to open database file}}} \ + -injectuninstall cantopen_injectuninstall \ +] +set FAULTSIM(cantopen-persistent) [list \ + -injectinstall cantopen_injectinstall \ + -injectstart {cantopen_injectstart 1} \ + -injectstop cantopen_injectstop \ + -injecterrlist {{1 {unable to open database file}}} \ + -injectuninstall cantopen_injectuninstall \ +] + + + +#-------------------------------------------------------------------------- +# Usage do_faultsim_test NAME ?OPTIONS...? +# +# -faults List of fault types to simulate. +# +# -prep Script to execute before -body. +# +# -body Script to execute (with fault injection). +# +# -test Script to execute after -body. +# +proc do_faultsim_test {name args} { + global FAULTSIM + + set DEFAULT(-faults) [array names FAULTSIM] + set DEFAULT(-prep) "" + set DEFAULT(-body) "" + set DEFAULT(-test) "" + + array set O [array get DEFAULT] + array set O $args + foreach o [array names O] { + if {[info exists DEFAULT($o)]==0} { error "unknown option: $o" } + } + + set faultlist [list] + foreach f $O(-faults) { + set flist [array names FAULTSIM $f] + if {[llength $flist]==0} { error "unknown fault: $f" } + set faultlist [concat $faultlist $flist] + } + + set testspec [list -prep $O(-prep) -body $O(-body) -test $O(-test)] + foreach f [lsort -unique $faultlist] { + eval do_one_faultsim_test "$name-$f" $FAULTSIM($f) $testspec + } +} + +#------------------------------------------------------------------------- +# Procedures to save and restore the current file-system state: +# +# faultsim_save +# faultsim_restore +# faultsim_save_and_close +# faultsim_restore_and_reopen +# faultsim_delete_and_reopen +# +proc faultsim_save {} { + foreach f [glob -nocomplain sv_test.db*] { file delete -force $f } + foreach f [glob -nocomplain test.db*] { + set f2 "sv_$f" + file copy -force $f $f2 + } +} +proc faultsim_save_and_close {} { + faultsim_save + catch { db close } + return "" +} +proc faultsim_restore {} { + foreach f [glob -nocomplain test.db*] { file delete -force $f } + foreach f2 [glob -nocomplain sv_test.db*] { + set f [string range $f2 3 end] + file copy -force $f2 $f + } +} +proc faultsim_restore_and_reopen {{dbfile test.db}} { + catch { db close } + faultsim_restore + sqlite3 db $dbfile + sqlite3_extended_result_codes db 1 + sqlite3_db_config_lookaside db 0 0 0 +} + +proc faultsim_integrity_check {{db db}} { + set ic [$db eval { PRAGMA integrity_check }] + if {$ic != "ok"} { error "Integrity check: $ic" } +} + +proc faultsim_delete_and_reopen {{file test.db}} { + catch { db close } + foreach f [glob -nocomplain test.db*] { file delete -force $f } + sqlite3 db $file +} + + +# The following procs are used as [do_one_faultsim_test] callbacks when +# injecting OOM faults into test cases. +# +proc oom_injectstart {nRepeat iFail} { + sqlite3_memdebug_fail $iFail -repeat $nRepeat +} +proc oom_injectstop {} { + sqlite3_memdebug_fail -1 +} + +# The following procs are used as [do_one_faultsim_test] callbacks when +# injecting IO error faults into test cases. +# +proc ioerr_injectstart {persist iFail} { + set ::sqlite_io_error_persist $persist + set ::sqlite_io_error_pending $iFail +} +proc ioerr_injectstop {} { + set sv $::sqlite_io_error_hit + set ::sqlite_io_error_persist 0 + set ::sqlite_io_error_pending 0 + set ::sqlite_io_error_hardhit 0 + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_pending 0 + return $sv +} + +# The following procs are used as [do_one_faultsim_test] callbacks when +# injecting shared-memory related error faults into test cases. +# +proc shmerr_injectinstall {} { + testvfs shmfault -default true + shmfault filter {xShmOpen xShmMap xShmLock} +} +proc shmerr_injectuninstall {} { + catch {db close} + catch {db2 close} + shmfault delete +} +proc shmerr_injectstart {persist iFail} { + shmfault ioerr $iFail $persist +} +proc shmerr_injectstop {} { + shmfault ioerr +} + +# The following procs are used as [do_one_faultsim_test] callbacks when +# injecting SQLITE_FULL error faults into test cases. +# +proc fullerr_injectinstall {} { + testvfs shmfault -default true +} +proc fullerr_injectuninstall {} { + catch {db close} + catch {db2 close} + shmfault delete +} +proc fullerr_injectstart {iFail} { + shmfault full $iFail 1 +} +proc fullerr_injectstop {} { + shmfault full +} + +# The following procs are used as [do_one_faultsim_test] callbacks when +# injecting SQLITE_CANTOPEN error faults into test cases. +# +proc cantopen_injectinstall {} { + testvfs shmfault -default true +} +proc cantopen_injectuninstall {} { + catch {db close} + catch {db2 close} + shmfault delete +} +proc cantopen_injectstart {persist iFail} { + shmfault cantopen $iFail $persist +} +proc cantopen_injectstop {} { + shmfault cantopen +} + +# This command is not called directly. It is used by the +# [faultsim_test_result] command created by [do_faultsim_test] and used +# by -test scripts. +# +proc faultsim_test_result_int {args} { + upvar testrc testrc testresult testresult testnfail testnfail + set t [list $testrc $testresult] + set r $args + if { ($testnfail==0 && $t != [lindex $r 0]) || [lsearch $r $t]<0 } { + error "nfail=$testnfail rc=$testrc result=$testresult" + } +} + +#-------------------------------------------------------------------------- +# Usage do_one_faultsim_test NAME ?OPTIONS...? +# +# The first argument, , is used as a prefix of the test names +# taken by tests executed by this command. Options are as follows. All +# options take a single argument. +# +# -injectstart Script to enable fault-injection. +# +# -injectstop Script to disable fault-injection. +# +# -injecterrlist List of generally acceptable test results (i.e. error +# messages). Example: [list {1 {out of memory}}] +# +# -injectinstall +# +# -injectuninstall +# +# -prep Script to execute before -body. +# +# -body Script to execute (with fault injection). +# +# -test Script to execute after -body. +# +proc do_one_faultsim_test {testname args} { + + set DEFAULT(-injectstart) "expr" + set DEFAULT(-injectstop) "expr 0" + set DEFAULT(-injecterrlist) [list] + set DEFAULT(-injectinstall) "" + set DEFAULT(-injectuninstall) "" + set DEFAULT(-prep) "" + set DEFAULT(-body) "" + set DEFAULT(-test) "" + + array set O [array get DEFAULT] + array set O $args + foreach o [array names O] { + if {[info exists DEFAULT($o)]==0} { error "unknown option: $o" } + } + + proc faultsim_test_proc {testrc testresult testnfail} $O(-test) + proc faultsim_test_result {args} " + uplevel faultsim_test_result_int \$args [list $O(-injecterrlist)] + " + + eval $O(-injectinstall) + + set stop 0 + for {set iFail 1} {!$stop} {incr iFail} { + + # Evaluate the -prep script. + # + eval $O(-prep) + + # Start the fault-injection. Run the -body script. Stop the fault + # injection. Local var $nfail is set to the total number of faults + # injected into the system this trial. + # + eval $O(-injectstart) $iFail + set rc [catch $O(-body) res] + set nfail [eval $O(-injectstop)] + + # Run the -test script. If it throws no error, consider this trial + # sucessful. If it does throw an error, cause a [do_test] test to + # fail (and print out the unexpected exception thrown by the -test + # script at the same time). + # + set rc [catch [list faultsim_test_proc $rc $res $nfail] res] + if {$rc == 0} {set res ok} + do_test $testname.$iFail [list list $rc $res] {0 ok} + + # If no faults where injected this trial, don't bother running + # any more. This test is finished. + # + if {$nfail==0} { set stop 1 } + } + + eval $O(-injectuninstall) +} + # Usage: do_malloc_test # # The first argument, , is an integer used to name the @@ -87,10 +421,13 @@ proc do_malloc_test {tn args} { # with the handle [db]. # catch {db close} + catch {db2 close} catch {file delete -force test.db} catch {file delete -force test.db-journal} + catch {file delete -force test.db-wal} catch {file delete -force test2.db} catch {file delete -force test2.db-journal} + catch {file delete -force test2.db-wal} if {[info exists ::mallocopts(-testdb)]} { file copy $::mallocopts(-testdb) test.db } @@ -165,3 +502,157 @@ proc do_malloc_test {tn args} { unset ::mallocopts sqlite3_memdebug_fail -1 } + + +#------------------------------------------------------------------------- +# This proc is used to test a single SELECT statement. Parameter $name is +# passed a name for the test case (i.e. "fts3_malloc-1.4.1") and parameter +# $sql is passed the text of the SELECT statement. Parameter $result is +# set to the expected output if the SELECT statement is successfully +# executed using [db eval]. +# +# Example: +# +# do_select_test testcase-1.1 "SELECT 1+1, 1+2" {1 2} +# +# If global variable DO_MALLOC_TEST is set to a non-zero value, or if +# it is not defined at all, then OOM testing is performed on the SELECT +# statement. Each OOM test case is said to pass if either (a) executing +# the SELECT statement succeeds and the results match those specified +# by parameter $result, or (b) TCL throws an "out of memory" error. +# +# If DO_MALLOC_TEST is defined and set to zero, then the SELECT statement +# is executed just once. In this case the test case passes if the results +# match the expected results passed via parameter $result. +# +proc do_select_test {name sql result} { + uplevel [list doPassiveTest 0 $name $sql [list 0 $result]] +} + +proc do_restart_select_test {name sql result} { + uplevel [list doPassiveTest 1 $name $sql [list 0 $result]] +} + +proc do_error_test {name sql error} { + uplevel [list doPassiveTest 0 $name $sql [list 1 $error]] +} + +proc doPassiveTest {isRestart name sql catchres} { + if {![info exists ::DO_MALLOC_TEST]} { set ::DO_MALLOC_TEST 1 } + + switch $::DO_MALLOC_TEST { + 0 { # No malloc failures. + do_test $name [list set {} [uplevel [list catchsql $sql]]] $catchres + return + } + 1 { # Simulate transient failures. + set nRepeat 1 + set zName "transient" + set nStartLimit 100000 + set nBackup 1 + } + 2 { # Simulate persistent failures. + set nRepeat 1 + set zName "persistent" + set nStartLimit 100000 + set nBackup 1 + } + 3 { # Simulate transient failures with extra brute force. + set nRepeat 100000 + set zName "ridiculous" + set nStartLimit 1 + set nBackup 10 + } + } + + # The set of acceptable results from running [catchsql $sql]. + # + set answers [list {1 {out of memory}} $catchres] + set str [join $answers " OR "] + + set nFail 1 + for {set iLimit $nStartLimit} {$nFail} {incr iLimit} { + for {set iFail 1} {$nFail && $iFail<=$iLimit} {incr iFail} { + for {set iTest 0} {$iTest<$nBackup && ($iFail-$iTest)>0} {incr iTest} { + + if {$isRestart} { sqlite3 db test.db } + + sqlite3_memdebug_fail [expr $iFail-$iTest] -repeat $nRepeat + set res [uplevel [list catchsql $sql]] + if {[lsearch -exact $answers $res]>=0} { set res $str } + set testname "$name.$zName.$iFail" + do_test "$name.$zName.$iLimit.$iFail" [list set {} $res] $str + + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] + } + } + } +} + + +#------------------------------------------------------------------------- +# Test a single write to the database. In this case a "write" is a +# DELETE, UPDATE or INSERT statement. +# +# If OOM testing is performed, there are several acceptable outcomes: +# +# 1) The write succeeds. No error is returned. +# +# 2) An "out of memory" exception is thrown and: +# +# a) The statement has no effect, OR +# b) The current transaction is rolled back, OR +# c) The statement succeeds. This can only happen if the connection +# is in auto-commit mode (after the statement is executed, so this +# includes COMMIT statements). +# +# If the write operation eventually succeeds, zero is returned. If a +# transaction is rolled back, non-zero is returned. +# +# Parameter $name is the name to use for the test case (or test cases). +# The second parameter, $tbl, should be the name of the database table +# being modified. Parameter $sql contains the SQL statement to test. +# +proc do_write_test {name tbl sql} { + if {![info exists ::DO_MALLOC_TEST]} { set ::DO_MALLOC_TEST 1 } + + # Figure out an statement to get a checksum for table $tbl. + db eval "SELECT * FROM $tbl" V break + set cksumsql "SELECT md5sum([join [concat rowid $V(*)] ,]) FROM $tbl" + + # Calculate the initial table checksum. + set cksum1 [db one $cksumsql] + + if {$::DO_MALLOC_TEST } { + set answers [list {1 {out of memory}} {0 {}}] + if {$::DO_MALLOC_TEST==1} { + set modes {100000 persistent} + } else { + set modes {1 transient} + } + } else { + set answers [list {0 {}}] + set modes [list 0 nofail] + } + set str [join $answers " OR "] + + foreach {nRepeat zName} $modes { + for {set iFail 1} 1 {incr iFail} { + if {$::DO_MALLOC_TEST} {sqlite3_memdebug_fail $iFail -repeat $nRepeat} + + set res [uplevel [list catchsql $sql]] + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] + if {$nFail==0} { + do_test $name.$zName.$iFail [list set {} $res] {0 {}} + return + } else { + if {[lsearch $answers $res]>=0} { + set res $str + } + do_test $name.$zName.$iFail [list set {} $res] $str + set cksum2 [db one $cksumsql] + if {$cksum1 != $cksum2} return + } + } + } +} diff --git a/test/memleak.test b/test/memleak.test index f4aaf27..a24a901 100644 --- a/test/memleak.test +++ b/test/memleak.test @@ -75,12 +75,11 @@ foreach testfile $FILELIST { } if {$LeakList!=""} { puts -nonewline memory-leak-test-$tail... - incr ::nTest + incr_ntest foreach x $LeakList { if {$x!=[lindex $LeakList 0]} { puts " failed! ($LeakList)" - incr ::nErr - lappend ::failList memory-leak-test-$tail + fail_test memory-leak-test-$tail break } } diff --git a/test/memsubsys1.test b/test/memsubsys1.test index 8524fde..918b286 100644 --- a/test/memsubsys1.test +++ b/test/memsubsys1.test @@ -17,6 +17,15 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl sqlite3_reset_auto_extension +# This test assumes that no page-cache or scratch buffers are installed +# by default when a new database connection is opened. As a result, it +# will not work with the "memsubsys1" permutation. +# +if {[permutation] == "memsubsys1"} { + finish_test + return +} + # This procedure constructs a new database in test.db. It fills # this database with many small records (enough to force multiple # rebalance operations in the btree-layer and to require a large diff --git a/test/minmax3.test b/test/minmax3.test index c387b04..8e8a0a5 100644 --- a/test/minmax3.test +++ b/test/minmax3.test @@ -13,6 +13,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Do an SQL statement. Append the search count to the end of the result. # proc count sql { diff --git a/test/misc1.test b/test/misc1.test index 7f93686..a78e488 100644 --- a/test/misc1.test +++ b/test/misc1.test @@ -481,8 +481,12 @@ do_test misc1-14.1 { execsql {BEGIN} file exists ./test.db-journal } {0} -do_test misc1-14.2 { - execsql {UPDATE t1 SET a=0 WHERE 0} +do_test misc1-14.2a { + execsql {UPDATE t1 SET a=a||'x' WHERE 0} + file exists ../test.db-journal +} {0} +do_test misc1-14.2b { + execsql {UPDATE t1 SET a=a||'y' WHERE 1} file exists ../test.db-journal } {1} do_test misc1-14.3 { diff --git a/test/misc5.test b/test/misc5.test index a28f491..34b2284 100644 --- a/test/misc5.test +++ b/test/misc5.test @@ -511,17 +511,19 @@ ifcapable subquery { # Ticket #1370. Do not overwrite small files (less than 1024 bytes) # when trying to open them as a database. # -do_test misc5-4.1 { - db close - file delete -force test.db - set fd [open test.db w] - puts $fd "This is not really a database" - close $fd - sqlite3 db test.db - catchsql { - CREATE TABLE t1(a,b,c); - } -} {1 {file is encrypted or is not a database}} +if {[permutation] == ""} { + do_test misc5-4.1 { + db close + file delete -force test.db + set fd [open test.db w] + puts $fd "This is not really a database" + close $fd + sqlite3 db test.db + catchsql { + CREATE TABLE t1(a,b,c); + } + } {1 {file is encrypted or is not a database}} +} # Ticket #1371. Allow floating point numbers of the form .N or N. # diff --git a/test/nan.test b/test/nan.test index 25672cb..0e9462f 100644 --- a/test/nan.test +++ b/test/nan.test @@ -24,6 +24,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + do_test nan-1.1.1 { db eval { PRAGMA auto_vacuum=OFF; @@ -61,13 +66,13 @@ if {$tcl_platform(platform) != "symbian"} { sqlite3_reset $::STMT db eval {SELECT x, typeof(x) FROM t1} } {{} null inf real -inf real {} null {} null} - do_test nan-1.1.5 { + do_test nan-1.1.6 { sqlite3_bind_double $::STMT 1 -NaN0 sqlite3_step $::STMT sqlite3_reset $::STMT db eval {SELECT x, typeof(x) FROM t1} } {{} null inf real -inf real {} null {} null {} null} - do_test nan-1.1.6 { + do_test nan-1.1.7 { db eval { UPDATE t1 SET x=x-x; SELECT x, typeof(x) FROM t1; @@ -115,13 +120,13 @@ do_test nan-1.2.5 { sqlite3_reset $::STMT db eval {SELECT CAST(x AS text), typeof(x) FROM t1} } {{} null Inf real -Inf real {} null {} null} -do_test nan-1.2.5 { +do_test nan-1.2.6 { sqlite3_bind_double $::STMT 1 -NaN0 sqlite3_step $::STMT sqlite3_reset $::STMT db eval {SELECT CAST(x AS text), typeof(x) FROM t1} } {{} null Inf real -Inf real {} null {} null {} null} -do_test nan-1.2.6 { +do_test nan-1.2.7 { db eval { UPDATE t1 SET x=x-x; SELECT CAST(x AS text), typeof(x) FROM t1; @@ -248,23 +253,23 @@ do_test nan-4.10 { db eval {SELECT CAST(x AS text), typeof(x) FROM t1} } {-Inf real} -do_test nan-4.10 { +do_test nan-4.11 { db eval {DELETE FROM t1} db eval "INSERT INTO t1 VALUES(1234.5[string repeat 0 10000]12345)" db eval {SELECT x, typeof(x) FROM t1} } {1234.5 real} -do_test nan-4.11 { +do_test nan-4.12 { db eval {DELETE FROM t1} db eval "INSERT INTO t1 VALUES(-1234.5[string repeat 0 10000]12345)" db eval {SELECT x, typeof(x) FROM t1} } {-1234.5 real} -do_test nan-4.12 { +do_test nan-4.13 { db eval {DELETE FROM t1} set small [string repeat 0 10000].[string repeat 0 324][string repeat 9 10000] db eval "INSERT INTO t1 VALUES($small)" db eval {SELECT x, typeof(x) FROM t1} } {0.0 real} -do_test nan-4.13 { +do_test nan-4.14 { db eval {DELETE FROM t1} set small \ -[string repeat 0 10000].[string repeat 0 324][string repeat 9 10000] @@ -279,14 +284,14 @@ if {$tcl_platform(platform) != "symbian"} { # the very small numbers back to text form (probably due to a difference # in the sprintf() implementation). # - do_test nan-4.14 { + do_test nan-4.15 { db eval {DELETE FROM t1} set small \ [string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] db eval "INSERT INTO t1 VALUES($small)" db eval {SELECT x, typeof(x) FROM t1} } {9.88131291682493e-324 real} - do_test nan-4.15 { + do_test nan-4.16 { db eval {DELETE FROM t1} set small \ -[string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] @@ -294,13 +299,13 @@ if {$tcl_platform(platform) != "symbian"} { db eval {SELECT x, typeof(x) FROM t1} } {-9.88131291682493e-324 real} } -do_test nan-4.16 { +do_test nan-4.17 { db eval {DELETE FROM t1} set small [string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] db eval "INSERT INTO t1 VALUES($small)" db eval {SELECT CAST(x AS text), typeof(x) FROM t1} } {9.88131291682493e-324 real} -do_test nan-4.17 { +do_test nan-4.18 { db eval {DELETE FROM t1} set small \ -[string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] diff --git a/test/notify3.test b/test/notify3.test new file mode 100644 index 0000000..7c7d5ac --- /dev/null +++ b/test/notify3.test @@ -0,0 +1,146 @@ +# 2010 June 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite3_unlock_notify() API. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# This script only runs if shared-cache and unlock-notify are available. +# +ifcapable !unlock_notify||!shared_cache { + finish_test + return +} + +set esc [sqlite3_enable_shared_cache 1] + +sqlite3 db test.db +file delete -force test.db2 test.db2-journal test.db2-wal +sqlite3 db2 test.db2 + +do_test notify3-1.1 { + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES('t1 A', 't1 B'); + } +} {} +do_test notify3-1.2 { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES('t2 A', 't2 B'); + } db2 +} {} + +do_test notify3-1.3 { + execsql { + BEGIN EXCLUSIVE; + INSERT INTO t2 VALUES('t2 C', 't2 D'); + } db2 +} {} +do_test notify3-1.4 { + catchsql { ATTACH 'test.db2' AS aux } +} {0 {}} + +do_test notify3-1.5 { + catchsql { SELECT * FROM t2 } +} {1 {database schema is locked: aux}} + +do_test notify3-1.6 { + list [sqlite3_errcode db] [sqlite3_extended_errcode db] +} {SQLITE_LOCKED SQLITE_LOCKED_SHAREDCACHE} + +do_test notify3-1.7 { + sqlite3_extended_result_codes db 1 + catch { set ::stmt [sqlite3_prepare_v2 db "SELECT * FROM t2" -1 tail] } msg + set msg +} {(262) database schema is locked: aux} + +do_test notify3-1.8 { + set ::when 1 + db unlock_notify { set ::res $::when } + set ::when 2 + execsql { COMMIT } db2 + set ::res +} {2} +do_test notify3-1.9 { + catchsql { SELECT * FROM t2 } +} {0 {{t2 A} {t2 B} {t2 C} {t2 D}}} +db close + + +set err {{1 {unable to open database: test.db2}}} +set noerr {{0 {}}} + +# When a new database is attached, the connection doing the attaching +# tries to load any unloaded schemas for both the new database and any +# already attached databases (including the main database). If it is +# unable to load any such schemas, then the ATTACH statement fails. +# +# This block tests that if the loading of schemas as a result of an +# ATTACH fails due to locks on the schema table held by other shared-cache +# connections the extended error code is SQLITE_LOCKED_SHAREDCACHE and +# it is possible to use the unlock-notify mechanism to determine when +# the ATTACH might succeed. +# +foreach { + tn + db1_loaded + db2_loaded + enable_extended_errors + result + error1 error2 +} " + 0 0 0 0 $err SQLITE_LOCKED SQLITE_LOCKED_SHAREDCACHE + 1 0 0 1 $err SQLITE_LOCKED_SHAREDCACHE SQLITE_LOCKED_SHAREDCACHE + 2 0 1 0 $err SQLITE_LOCKED SQLITE_LOCKED_SHAREDCACHE + 3 0 1 1 $err SQLITE_LOCKED_SHAREDCACHE SQLITE_LOCKED_SHAREDCACHE + 4 1 0 0 $err SQLITE_LOCKED SQLITE_LOCKED_SHAREDCACHE + 5 1 0 1 $err SQLITE_LOCKED_SHAREDCACHE SQLITE_LOCKED_SHAREDCACHE + 6 1 1 0 $noerr SQLITE_OK SQLITE_OK + 7 1 1 1 $noerr SQLITE_OK SQLITE_OK +" { + + do_test notify3-2.$tn.1 { + catch { db1 close } + catch { db2 close } + sqlite3 db1 test.db + sqlite3 db2 test.db2 + + sqlite3_extended_result_codes db1 $enable_extended_errors + sqlite3_extended_result_codes db2 $enable_extended_errors + + if { $db1_loaded } { db1 eval "SELECT * FROM sqlite_master" } + if { $db2_loaded } { db2 eval "SELECT * FROM sqlite_master" } + + db2 eval "BEGIN EXCLUSIVE" + catchsql "ATTACH 'test.db2' AS two" db1 + } $result + + do_test notify3-2.$tn.2 { + list [sqlite3_errcode db1] [sqlite3_extended_errcode db1] + } [list $error1 $error2] + + do_test notify3-2.$tn.3 { + db1 unlock_notify {set invoked 1} + set invoked 0 + db2 eval commit + set invoked + } [lindex $result 0] +} +catch { db1 close } +catch { db2 close } + + +sqlite3_enable_shared_cache $esc +finish_test + diff --git a/test/pager1.test b/test/pager1.test new file mode 100644 index 0000000..7f28002 --- /dev/null +++ b/test/pager1.test @@ -0,0 +1,2246 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl +source $testdir/wal_common.tcl + +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + +# +# pager1-1.*: Test inter-process locking (clients in multiple processes). +# +# pager1-2.*: Test intra-process locking (multiple clients in this process). +# +# pager1-3.*: Savepoint related tests. +# +# pager1-4.*: Hot-journal related tests. +# +# pager1-5.*: Cases related to multi-file commits. +# +# pager1-6.*: Cases related to "PRAGMA max_page_count" +# +# pager1-7.*: Cases specific to "PRAGMA journal_mode=TRUNCATE" +# +# pager1-8.*: Cases using temporary and in-memory databases. +# +# pager1-9.*: Tests related to the backup API. +# +# pager1-10.*: Test that the assumed file-system sector-size is limited to +# 64KB. +# +# pager1-12.*: Tests involving "PRAGMA page_size" +# +# pager1-13.*: Cases specific to "PRAGMA journal_mode=PERSIST" +# +# pager1-14.*: Cases specific to "PRAGMA journal_mode=OFF" +# +# pager1-15.*: Varying sqlite3_vfs.szOsFile +# +# pager1-16.*: Varying sqlite3_vfs.mxPathname +# +# pager1-17.*: Tests related to "PRAGMA omit_readlock" +# +# pager1-18.*: Test that the pager layer responds correctly if the b-tree +# requests an invalid page number (due to db corruption). +# + +proc recursive_select {id table {script {}}} { + set cnt 0 + db eval "SELECT rowid, * FROM $table WHERE rowid = ($id-1)" { + recursive_select $rowid $table $script + incr cnt + } + if {$cnt==0} { eval $script } +} + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} +db func a_string a_string + +do_multiclient_test tn { + + # Create and populate a database table using connection [db]. Check + # that connections [db2] and [db3] can see the schema and content. + # + do_test pager1-$tn.1 { + sql1 { + CREATE TABLE t1(a PRIMARY KEY, b); + CREATE INDEX i1 ON t1(b); + INSERT INTO t1 VALUES(1, 'one'); INSERT INTO t1 VALUES(2, 'two'); + } + } {} + do_test pager1-$tn.2 { sql2 { SELECT * FROM t1 } } {1 one 2 two} + do_test pager1-$tn.3 { sql3 { SELECT * FROM t1 } } {1 one 2 two} + + # Open a transaction and add a row using [db]. This puts [db] in + # RESERVED state. Check that connections [db2] and [db3] can still + # read the database content as it was before the transaction was + # opened. [db] should see the inserted row. + # + do_test pager1-$tn.4 { + sql1 { + BEGIN; + INSERT INTO t1 VALUES(3, 'three'); + } + } {} + do_test pager1-$tn.5 { sql2 { SELECT * FROM t1 } } {1 one 2 two} + do_test pager1-$tn.7 { sql1 { SELECT * FROM t1 } } {1 one 2 two 3 three} + + # [db] still has an open write transaction. Check that this prevents + # other connections (specifically [db2]) from writing to the database. + # + # Even if [db2] opens a transaction first, it may not write to the + # database. After the attempt to write the db within a transaction, + # [db2] is left with an open transaction, but not a read-lock on + # the main database. So it does not prevent [db] from committing. + # + do_test pager1-$tn.8 { + csql2 { UPDATE t1 SET a = a + 10 } + } {1 {database is locked}} + do_test pager1-$tn.9 { + csql2 { + BEGIN; + UPDATE t1 SET a = a + 10; + } + } {1 {database is locked}} + + # Have [db] commit its transactions. Check the other connections can + # now see the new database content. + # + do_test pager1-$tn.10 { sql1 { COMMIT } } {} + do_test pager1-$tn.11 { sql1 { SELECT * FROM t1 } } {1 one 2 two 3 three} + do_test pager1-$tn.12 { sql2 { SELECT * FROM t1 } } {1 one 2 two 3 three} + do_test pager1-$tn.13 { sql3 { SELECT * FROM t1 } } {1 one 2 two 3 three} + + # Check that, as noted above, [db2] really did keep an open transaction + # after the attempt to write the database failed. + # + do_test pager1-$tn.14 { + csql2 { BEGIN } + } {1 {cannot start a transaction within a transaction}} + do_test pager1-$tn.15 { sql2 { ROLLBACK } } {} + + # Have [db2] open a transaction and take a read-lock on the database. + # Check that this prevents [db] from writing to the database (outside + # of any transaction). After this fails, check that [db3] can read + # the db (showing that [db] did not take a PENDING lock etc.) + # + do_test pager1-$tn.15 { + sql2 { BEGIN; SELECT * FROM t1; } + } {1 one 2 two 3 three} + do_test pager1-$tn.16 { + csql1 { UPDATE t1 SET a = a + 10 } + } {1 {database is locked}} + do_test pager1-$tn.17 { sql3 { SELECT * FROM t1 } } {1 one 2 two 3 three} + + # This time, have [db] open a transaction before writing the database. + # This works - [db] gets a RESERVED lock which does not conflict with + # the SHARED lock [db2] is holding. + # + do_test pager1-$tn.18 { + sql1 { + BEGIN; + UPDATE t1 SET a = a + 10; + } + } {} + do_test pager1-$tn-19 { + sql1 { PRAGMA lock_status } + } {main reserved temp closed} + do_test pager1-$tn-20 { + sql2 { PRAGMA lock_status } + } {main shared temp closed} + + # Check that all connections can still read the database. Only [db] sees + # the updated content (as the transaction has not been committed yet). + # + do_test pager1-$tn.21 { sql1 { SELECT * FROM t1 } } {11 one 12 two 13 three} + do_test pager1-$tn.22 { sql2 { SELECT * FROM t1 } } {1 one 2 two 3 three} + do_test pager1-$tn.23 { sql3 { SELECT * FROM t1 } } {1 one 2 two 3 three} + + # Because [db2] still has the SHARED lock, [db] is unable to commit the + # transaction. If it tries, an error is returned and the connection + # upgrades to a PENDING lock. + # + # Once this happens, [db] can read the database and see the new content, + # [db2] (still holding SHARED) can still read the old content, but [db3] + # (not holding any lock) is prevented by [db]'s PENDING from reading + # the database. + # + do_test pager1-$tn.24 { csql1 { COMMIT } } {1 {database is locked}} + do_test pager1-$tn-25 { + sql1 { PRAGMA lock_status } + } {main pending temp closed} + do_test pager1-$tn.26 { sql1 { SELECT * FROM t1 } } {11 one 12 two 13 three} + do_test pager1-$tn.27 { sql2 { SELECT * FROM t1 } } {1 one 2 two 3 three} + do_test pager1-$tn.28 { csql3 { SELECT * FROM t1 } } {1 {database is locked}} + + # Have [db2] commit its read transaction, releasing the SHARED lock it + # is holding. Now, neither [db2] nor [db3] may read the database (as [db] + # is still holding a PENDING). + # + do_test pager1-$tn.29 { sql2 { COMMIT } } {} + do_test pager1-$tn.30 { csql2 { SELECT * FROM t1 } } {1 {database is locked}} + do_test pager1-$tn.31 { csql3 { SELECT * FROM t1 } } {1 {database is locked}} + + # [db] is now able to commit the transaction. Once the transaction is + # committed, all three connections can read the new content. + # + do_test pager1-$tn.25 { sql1 { UPDATE t1 SET a = a+10 } } {} + do_test pager1-$tn.26 { sql1 { COMMIT } } {} + do_test pager1-$tn.27 { sql1 { SELECT * FROM t1 } } {21 one 22 two 23 three} + do_test pager1-$tn.27 { sql2 { SELECT * FROM t1 } } {21 one 22 two 23 three} + do_test pager1-$tn.28 { sql3 { SELECT * FROM t1 } } {21 one 22 two 23 three} + + # Install a busy-handler for connection [db]. + # + set ::nbusy [list] + proc busy {n} { + lappend ::nbusy $n + if {$n>5} { sql2 COMMIT } + return 0 + } + db busy busy + + do_test pager1-$tn.29 { + sql1 { BEGIN ; INSERT INTO t1 VALUES('x', 'y') } + } {} + do_test pager1-$tn.30 { + sql2 { BEGIN ; SELECT * FROM t1 } + } {21 one 22 two 23 three} + do_test pager1-$tn.31 { sql1 COMMIT } {} + do_test pager1-$tn.32 { set ::nbusy } {0 1 2 3 4 5 6} +} + +#------------------------------------------------------------------------- +# Savepoint related test cases. +# +# pager1-3.1.2.*: Force a savepoint rollback to cause the database file +# to grow. +# +# pager1-3.1.3.*: Use a journal created in synchronous=off mode as part +# of a savepoint rollback. +# +do_test pager1-3.1.1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + CREATE TABLE counter( + i CHECK (i<5), + u CHECK (u<10) + ); + INSERT INTO counter VALUES(0, 0); + CREATE TRIGGER tr1 AFTER INSERT ON t1 BEGIN + UPDATE counter SET i = i+1; + END; + CREATE TRIGGER tr2 AFTER UPDATE ON t1 BEGIN + UPDATE counter SET u = u+1; + END; + } + execsql { SELECT * FROM counter } +} {0 0} + +do_execsql_test pager1-3.1.2 { + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO t1 VALUES(1, randomblob(1500)); + INSERT INTO t1 VALUES(2, randomblob(1500)); + INSERT INTO t1 VALUES(3, randomblob(1500)); + SELECT * FROM counter; +} {3 0} +do_catchsql_test pager1-3.1.3 { + INSERT INTO t1 SELECT a+3, randomblob(1500) FROM t1 +} {1 {constraint failed}} +do_execsql_test pager1-3.4 { SELECT * FROM counter } {3 0} +do_execsql_test pager1-3.5 { SELECT a FROM t1 } {1 2 3} +do_execsql_test pager1-3.6 { COMMIT } {} + +foreach {tn sql tcl} { + 7 { PRAGMA synchronous = NORMAL ; PRAGMA temp_store = 0 } { + testvfs tv -default 1 + tv devchar safe_append + } + 8 { PRAGMA synchronous = NORMAL ; PRAGMA temp_store = 2 } { + testvfs tv -default 1 + tv devchar sequential + } + 9 { PRAGMA synchronous = FULL } { } + 10 { PRAGMA synchronous = NORMAL } { } + 11 { PRAGMA synchronous = OFF } { } + 12 { PRAGMA synchronous = FULL ; PRAGMA fullfsync = 1 } { } + 13 { PRAGMA synchronous = FULL } { + testvfs tv -default 1 + tv devchar sequential + } + 14 { PRAGMA locking_mode = EXCLUSIVE } { + } +} { + do_test pager1-3.$tn.1 { + eval $tcl + faultsim_delete_and_reopen + db func a_string a_string + execsql $sql + execsql { + PRAGMA auto_vacuum = 2; + PRAGMA cache_size = 10; + CREATE TABLE z(x INTEGER PRIMARY KEY, y); + BEGIN; + INSERT INTO z VALUES(NULL, a_string(800)); + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 2 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 4 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 8 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 16 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 32 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 64 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 128 + INSERT INTO z SELECT NULL, a_string(800) FROM z; -- 256 + COMMIT; + } + execsql { PRAGMA auto_vacuum } + } {2} + do_execsql_test pager1-3.$tn.2 { + BEGIN; + INSERT INTO z VALUES(NULL, a_string(800)); + INSERT INTO z VALUES(NULL, a_string(800)); + SAVEPOINT one; + UPDATE z SET y = NULL WHERE x>256; + PRAGMA incremental_vacuum; + SELECT count(*) FROM z WHERE x < 100; + ROLLBACK TO one; + COMMIT; + } {99} + + do_execsql_test pager1-3.$tn.3 { + BEGIN; + SAVEPOINT one; + UPDATE z SET y = y||x; + ROLLBACK TO one; + COMMIT; + SELECT count(*) FROM z; + } {258} + + do_execsql_test pager1-3.$tn.4 { + SAVEPOINT one; + UPDATE z SET y = y||x; + ROLLBACK TO one; + } {} + do_execsql_test pager1-3.$tn.5 { + SELECT count(*) FROM z; + RELEASE one; + PRAGMA integrity_check; + } {258 ok} + + do_execsql_test pager1-3.$tn.6 { + SAVEPOINT one; + RELEASE one; + } {} + + db close + catch { tv delete } +} + +#------------------------------------------------------------------------- +# Hot journal rollback related test cases. +# +# pager1.4.1.*: Test that the pager module deletes very small invalid +# journal files. +# +# pager1.4.2.*: Test that if the master journal pointer at the end of a +# hot-journal file appears to be corrupt (checksum does not +# compute) the associated journal is rolled back (and no +# xAccess() call to check for the presence of any master +# journal file is made). +# +# pager1.4.3.*: Test that the contents of a hot-journal are ignored if the +# page-size or sector-size in the journal header appear to +# be invalid (too large, too small or not a power of 2). +# +# pager1.4.4.*: Test hot-journal rollback of journal file with a master +# journal pointer generated in various "PRAGMA synchronous" +# modes. +# +# pager1.4.5.*: Test that hot-journal rollback stops if it encounters a +# journal-record for which the checksum fails. +# +# pager1.4.6.*: Test that when rolling back a hot-journal that contains a +# master journal pointer, the master journal file is deleted +# after all the hot-journals that refer to it are deleted. +# +# pager1.4.7.*: Test that if a hot-journal file exists but a client can +# open it for reading only, the database cannot be accessed and +# SQLITE_CANTOPEN is returned. +# +do_test pager1.4.1.1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE x(y, z); + INSERT INTO x VALUES(1, 2); + } + set fd [open test.db-journal w] + puts -nonewline $fd "helloworld" + close $fd + file exists test.db-journal +} {1} +do_test pager1.4.1.2 { execsql { SELECT * FROM x } } {1 2} +do_test pager1.4.1.3 { file exists test.db-journal } {0} + +# Set up a [testvfs] to snapshot the file-system just before SQLite +# deletes the master-journal to commit a multi-file transaction. +# +# In subsequent test cases, invoking [faultsim_restore_and_reopen] sets +# up the file system to contain two databases, two hot-journal files and +# a master-journal. +# +do_test pager1.4.2.1 { + testvfs tstvfs -default 1 + tstvfs filter xDelete + tstvfs script xDeleteCallback + proc xDeleteCallback {method file args} { + set file [file tail $file] + if { [string match *mj* $file] } { faultsim_save } + } + faultsim_delete_and_reopen + db func a_string a_string + execsql { + ATTACH 'test.db2' AS aux; + PRAGMA journal_mode = DELETE; + PRAGMA main.cache_size = 10; + PRAGMA aux.cache_size = 10; + CREATE TABLE t1(a UNIQUE, b UNIQUE); + CREATE TABLE aux.t2(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(a_string(200), a_string(300)); + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t2 SELECT * FROM t1; + BEGIN; + INSERT INTO t1 SELECT a_string(201), a_string(301) FROM t1; + INSERT INTO t1 SELECT a_string(202), a_string(302) FROM t1; + INSERT INTO t1 SELECT a_string(203), a_string(303) FROM t1; + INSERT INTO t1 SELECT a_string(204), a_string(304) FROM t1; + REPLACE INTO t2 SELECT * FROM t1; + COMMIT; + } + db close + tstvfs delete +} {} +do_test pager1.4.2.2 { + faultsim_restore_and_reopen + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {4 ok} +do_test pager1.4.2.3 { + faultsim_restore_and_reopen + foreach f [glob test.db-mj*] { file delete -force $f } + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {64 ok} +do_test pager1.4.2.4 { + faultsim_restore_and_reopen + hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456 + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {4 ok} +do_test pager1.4.2.5 { + faultsim_restore_and_reopen + hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456 + foreach f [glob test.db-mj*] { file delete -force $f } + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {4 ok} + +do_test pager1.4.3.1 { + testvfs tstvfs -default 1 + tstvfs filter xSync + tstvfs script xSyncCallback + proc xSyncCallback {method file args} { + set file [file tail $file] + if { 0==[string match *journal $file] } { faultsim_save } + } + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = DELETE; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + } + db close + tstvfs delete +} {} + +foreach {tn ofst value result} { + 2 20 31 {1 2 3 4} + 3 20 32 {1 2 3 4} + 4 20 33 {1 2 3 4} + 5 20 65536 {1 2 3 4} + 6 20 131072 {1 2 3 4} + + 7 24 511 {1 2 3 4} + 8 24 513 {1 2 3 4} + 9 24 65536 {1 2 3 4} + + 10 32 65536 {1 2} +} { + do_test pager1.4.3.$tn { + faultsim_restore_and_reopen + hexio_write test.db-journal $ofst [format %.8x $value] + execsql { SELECT * FROM t1 } + } $result +} +db close + +# Set up a VFS that snapshots the file-system just before a master journal +# file is deleted to commit a multi-file transaction. Specifically, the +# file-system is saved just before the xDelete() call to remove the +# master journal file from the file-system. +# +testvfs tv -default 1 +tv script copy_on_mj_delete +set ::mj_filename_length 0 +proc copy_on_mj_delete {method filename args} { + if {[string match *mj* [file tail $filename]]} { + set ::mj_filename_length [string length $filename] + faultsim_save + } + return SQLITE_OK +} + +set pwd [pwd] +foreach {tn1 tcl} { + 1 { set prefix "test.db" } + 2 { + # This test depends on the underlying VFS being able to open paths + # 512 bytes in length. The idea is to create a hot-journal file that + # contains a master-journal pointer so large that it could contain + # a valid page record (if the file page-size is 512 bytes). So as to + # make sure SQLite doesn't get confused by this. + # + set nPadding [expr 511 - $::mj_filename_length] + if {$tcl_platform(platform)=="windows"} { + # TBD need to figure out how to do this correctly for Windows!!! + set nPadding [expr 255 - $::mj_filename_length] + } + + # We cannot just create a really long database file name to open, as + # Linux limits a single component of a path to 255 bytes by default + # (and presumably other systems have limits too). So create a directory + # hierarchy to work in. + # + set dirname "d123456789012345678901234567890/" + set nDir [expr $nPadding / 32] + if { $nDir } { + set p [string repeat $dirname $nDir] + file mkdir $p + cd $p + } + + set padding [string repeat x [expr $nPadding %32]] + set prefix "test.db${padding}" + } +} { + eval $tcl + foreach {tn2 sql} { + o { + PRAGMA main.synchronous=OFF; + PRAGMA aux.synchronous=OFF; + PRAGMA journal_mode = DELETE; + } + o512 { + PRAGMA main.synchronous=OFF; + PRAGMA aux.synchronous=OFF; + PRAGMA main.page_size = 512; + PRAGMA aux.page_size = 512; + PRAGMA journal_mode = DELETE; + } + n { + PRAGMA main.synchronous=NORMAL; + PRAGMA aux.synchronous=NORMAL; + PRAGMA journal_mode = DELETE; + } + f { + PRAGMA main.synchronous=FULL; + PRAGMA aux.synchronous=FULL; + PRAGMA journal_mode = DELETE; + } + } { + + set tn "${tn1}.${tn2}" + + # Set up a connection to have two databases, test.db (main) and + # test.db2 (aux). Then run a multi-file transaction on them. The + # VFS will snapshot the file-system just before the master-journal + # file is deleted to commit the transaction. + # + tv filter xDelete + do_test pager1-4.4.$tn.1 { + faultsim_delete_and_reopen $prefix + execsql " + ATTACH '${prefix}2' AS aux; + $sql + CREATE TABLE a(x); + CREATE TABLE aux.b(x); + INSERT INTO a VALUES('double-you'); + INSERT INTO a VALUES('why'); + INSERT INTO a VALUES('zed'); + INSERT INTO b VALUES('won'); + INSERT INTO b VALUES('too'); + INSERT INTO b VALUES('free'); + " + execsql { + BEGIN; + INSERT INTO a SELECT * FROM b WHERE rowid<=3; + INSERT INTO b SELECT * FROM a WHERE rowid<=3; + COMMIT; + } + } {} + tv filter {} + + # Check that the transaction was committed successfully. + # + do_execsql_test pager1-4.4.$tn.2 { + SELECT * FROM a + } {double-you why zed won too free} + do_execsql_test pager1-4.4.$tn.3 { + SELECT * FROM b + } {won too free double-you why zed} + + # Restore the file-system and reopen the databases. Check that it now + # appears that the transaction was not committed (because the file-system + # was restored to the state where it had not been). + # + do_test pager1-4.4.$tn.4 { + faultsim_restore_and_reopen $prefix + execsql "ATTACH '${prefix}2' AS aux" + } {} + do_execsql_test pager1-4.4.$tn.5 {SELECT * FROM a} {double-you why zed} + do_execsql_test pager1-4.4.$tn.6 {SELECT * FROM b} {won too free} + + # Restore the file-system again. This time, before reopening the databases, + # delete the master-journal file from the file-system. It now appears that + # the transaction was committed (no master-journal file == no rollback). + # + do_test pager1-4.4.$tn.7 { + faultsim_restore_and_reopen $prefix + foreach f [glob ${prefix}-mj*] { file delete -force $f } + execsql "ATTACH '${prefix}2' AS aux" + } {} + do_execsql_test pager1-4.4.$tn.8 { + SELECT * FROM a + } {double-you why zed won too free} + do_execsql_test pager1-4.4.$tn.9 { + SELECT * FROM b + } {won too free double-you why zed} + } + + cd $pwd +} +db close +tv delete +file delete -force $dirname + + +# Set up a VFS to make a copy of the file-system just before deleting a +# journal file to commit a transaction. The transaction modifies exactly +# two database pages (and page 1 - the change counter). +# +testvfs tv -default 1 +tv sectorsize 512 +tv script copy_on_journal_delete +tv filter xDelete +proc copy_on_journal_delete {method filename args} { + if {[string match *journal $filename]} faultsim_save + return SQLITE_OK +} +faultsim_delete_and_reopen +do_execsql_test pager1.4.5.1 { + PRAGMA journal_mode = DELETE; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + INSERT INTO t1 VALUES('I', 'II'); + INSERT INTO t2 VALUES('III', 'IV'); + BEGIN; + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t2 VALUES(3, 4); + COMMIT; +} {delete} +tv filter {} + +# Check the transaction was committed: +# +do_execsql_test pager1.4.5.2 { + SELECT * FROM t1; + SELECT * FROM t2; +} {I II 1 2 III IV 3 4} + +# Now try four tests: +# +# pager1-4.5.3: Restore the file-system. Check that the whole transaction +# is rolled back. +# +# pager1-4.5.4: Restore the file-system. Corrupt the first record in the +# journal. Check the transaction is not rolled back. +# +# pager1-4.5.5: Restore the file-system. Corrupt the second record in the +# journal. Check that the first record in the transaction is +# played back, but not the second. +# +# pager1-4.5.6: Restore the file-system. Try to open the database with a +# readonly connection. This should fail, as a read-only +# connection cannot roll back the database file. +# +faultsim_restore_and_reopen +do_execsql_test pager1.4.5.3 { + SELECT * FROM t1; + SELECT * FROM t2; +} {I II III IV} +faultsim_restore_and_reopen +hexio_write test.db-journal [expr 512+4+1024 - 202] 0123456789ABCDEF +do_execsql_test pager1.4.5.4 { + SELECT * FROM t1; + SELECT * FROM t2; +} {I II 1 2 III IV 3 4} +faultsim_restore_and_reopen +hexio_write test.db-journal [expr 512+4+1024+4+4+1024 - 202] 0123456789ABCDEF +do_execsql_test pager1.4.5.5 { + SELECT * FROM t1; + SELECT * FROM t2; +} {I II III IV 3 4} + +faultsim_restore_and_reopen +db close +sqlite3 db test.db -readonly 1 +do_catchsql_test pager1.4.5.6 { + SELECT * FROM t1; + SELECT * FROM t2; +} {1 {disk I/O error}} +db close + +# Snapshot the file-system just before multi-file commit. Save the name +# of the master journal file in $::mj_filename. +# +tv script copy_on_mj_delete +tv filter xDelete +proc copy_on_mj_delete {method filename args} { + if {[string match *mj* [file tail $filename]]} { + set ::mj_filename $filename + faultsim_save + } + return SQLITE_OK +} +do_test pager1.4.6.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = DELETE; + ATTACH 'test.db2' AS two; + CREATE TABLE t1(a, b); + CREATE TABLE two.t2(a, b); + INSERT INTO t1 VALUES(1, 't1.1'); + INSERT INTO t2 VALUES(1, 't2.1'); + BEGIN; + UPDATE t1 SET b = 't1.2'; + UPDATE t2 SET b = 't2.2'; + COMMIT; + } + tv filter {} + db close +} {} + +faultsim_restore_and_reopen +do_execsql_test pager1.4.6.2 { SELECT * FROM t1 } {1 t1.1} +do_test pager1.4.6.3 { file exists $::mj_filename } {1} +do_execsql_test pager1.4.6.4 { + ATTACH 'test.db2' AS two; + SELECT * FROM t2; +} {1 t2.1} +do_test pager1.4.6.5 { file exists $::mj_filename } {0} + +faultsim_restore_and_reopen +db close +do_test pager1.4.6.8 { + set ::mj_filename1 $::mj_filename + tv filter xDelete + sqlite3 db test.db2 + execsql { + PRAGMA journal_mode = DELETE; + ATTACH 'test.db3' AS three; + CREATE TABLE three.t3(a, b); + INSERT INTO t3 VALUES(1, 't3.1'); + BEGIN; + UPDATE t2 SET b = 't2.3'; + UPDATE t3 SET b = 't3.3'; + COMMIT; + } + expr {$::mj_filename1 != $::mj_filename} +} {1} +faultsim_restore_and_reopen +tv filter {} + +# The file-system now contains: +# +# * three databases +# * three hot-journal files +# * two master-journal files. +# +# The hot-journals associated with test.db2 and test.db3 point to +# master journal $::mj_filename. The hot-journal file associated with +# test.db points to master journal $::mj_filename1. So reading from +# test.db should delete $::mj_filename1. +# +do_test pager1.4.6.9 { + lsort [glob test.db*] +} [lsort [list \ + test.db test.db2 test.db3 \ + test.db-journal test.db2-journal test.db3-journal \ + [file tail $::mj_filename] [file tail $::mj_filename1] +]] + +# The master-journal $::mj_filename1 contains pointers to test.db and +# test.db2. However the hot-journal associated with test.db2 points to +# a different master-journal. Therefore, reading from test.db only should +# be enough to cause SQLite to delete $::mj_filename1. +# +do_test pager1.4.6.10 { file exists $::mj_filename } {1} +do_test pager1.4.6.11 { file exists $::mj_filename1 } {1} +do_execsql_test pager1.4.6.12 { SELECT * FROM t1 } {1 t1.1} +do_test pager1.4.6.13 { file exists $::mj_filename } {1} +do_test pager1.4.6.14 { file exists $::mj_filename1 } {0} + +do_execsql_test pager1.4.6.12 { + ATTACH 'test.db2' AS two; + SELECT * FROM t2; +} {1 t2.1} +do_test pager1.4.6.13 { file exists $::mj_filename } {1} +do_execsql_test pager1.4.6.14 { + ATTACH 'test.db3' AS three; + SELECT * FROM t3; +} {1 t3.1} +do_test pager1.4.6.15 { file exists $::mj_filename } {0} + +db close +tv delete + +testvfs tv -default 1 +tv sectorsize 512 +tv script copy_on_journal_delete +tv filter xDelete +proc copy_on_journal_delete {method filename args} { + if {[string match *journal $filename]} faultsim_save + return SQLITE_OK +} +faultsim_delete_and_reopen +do_execsql_test pager1.4.7.1 { + PRAGMA journal_mode = DELETE; + CREATE TABLE t1(x PRIMARY KEY, y); + CREATE INDEX i1 ON t1(y); + INSERT INTO t1 VALUES('I', 'one'); + INSERT INTO t1 VALUES('II', 'four'); + INSERT INTO t1 VALUES('III', 'nine'); + BEGIN; + INSERT INTO t1 VALUES('IV', 'sixteen'); + INSERT INTO t1 VALUES('V' , 'twentyfive'); + COMMIT; +} {delete} +tv filter {} +db close +tv delete +do_test pager1.4.7.2 { + faultsim_restore_and_reopen + catch {file attributes test.db-journal -permissions r--------} + catch {file attributes test.db-journal -readonly 1} + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} +do_test pager1.4.7.3 { + db close + catch {file attributes test.db-journal -permissions rw-rw-rw-} + catch {file attributes test.db-journal -readonly 0} + file delete test.db-journal + file exists test.db-journal +} {0} + +#------------------------------------------------------------------------- +# The following tests deal with multi-file commits. +# +# pager1-5.1.*: The case where a multi-file cannot be committed because +# another connection is holding a SHARED lock on one of the +# files. After the SHARED lock is removed, the COMMIT succeeds. +# +# pager1-5.2.*: Multi-file commits with journal_mode=memory. +# +# pager1-5.3.*: Multi-file commits with journal_mode=memory. +# +# pager1-5.4.*: Check that with synchronous=normal, the master-journal file +# name is added to a journal file immediately after the last +# journal record. But with synchronous=full, extra unused space +# is allocated between the last journal record and the +# master-journal file name so that the master-journal file +# name does not lie on the same sector as the last journal file +# record. +# +# pager1-5.5.*: Check that in journal_mode=PERSIST mode, a journal file is +# truncated to zero bytes when a multi-file transaction is +# committed (instead of the first couple of bytes being zeroed). +# +# +do_test pager1-5.1.1 { + faultsim_delete_and_reopen + execsql { + ATTACH 'test.db2' AS aux; + CREATE TABLE t1(a, b); + CREATE TABLE aux.t2(a, b); + INSERT INTO t1 VALUES(17, 'Lenin'); + INSERT INTO t1 VALUES(22, 'Stalin'); + INSERT INTO t1 VALUES(53, 'Khrushchev'); + } +} {} +do_test pager1-5.1.2 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(64, 'Brezhnev'); + INSERT INTO t2 SELECT * FROM t1; + } + sqlite3 db2 test.db2 + execsql { + BEGIN; + SELECT * FROM t2; + } db2 +} {} +do_test pager1-5.1.3 { + catchsql COMMIT +} {1 {database is locked}} +do_test pager1-5.1.4 { + execsql COMMIT db2 + execsql COMMIT + execsql { SELECT * FROM t2 } db2 +} {17 Lenin 22 Stalin 53 Khrushchev 64 Brezhnev} +do_test pager1-5.1.5 { + db2 close +} {} + +do_test pager1-5.2.1 { + execsql { + PRAGMA journal_mode = memory; + BEGIN; + INSERT INTO t1 VALUES(84, 'Andropov'); + INSERT INTO t2 VALUES(84, 'Andropov'); + COMMIT; + } +} {memory} +do_test pager1-5.3.1 { + execsql { + PRAGMA journal_mode = off; + BEGIN; + INSERT INTO t1 VALUES(85, 'Gorbachev'); + INSERT INTO t2 VALUES(85, 'Gorbachev'); + COMMIT; + } +} {off} + +do_test pager1-5.4.1 { + db close + testvfs tv + sqlite3 db test.db -vfs tv + execsql { ATTACH 'test.db2' AS aux } + + tv filter xDelete + tv script max_journal_size + tv sectorsize 512 + set ::max_journal 0 + proc max_journal_size {method args} { + set sz 0 + catch { set sz [file size test.db-journal] } + if {$sz > $::max_journal} { + set ::max_journal $sz + } + return SQLITE_OK + } + execsql { + PRAGMA journal_mode = DELETE; + PRAGMA synchronous = NORMAL; + BEGIN; + INSERT INTO t1 VALUES(85, 'Gorbachev'); + INSERT INTO t2 VALUES(85, 'Gorbachev'); + COMMIT; + } + set ::max_journal +} [expr 2615+[string length [pwd]]] +do_test pager1-5.4.2 { + set ::max_journal 0 + execsql { + PRAGMA synchronous = full; + BEGIN; + DELETE FROM t1 WHERE b = 'Lenin'; + DELETE FROM t2 WHERE b = 'Lenin'; + COMMIT; + } + set ::max_journal +} [expr 3111+[string length [pwd]]] +db close +tv delete + +do_test pager1-5.5.1 { + sqlite3 db test.db + execsql { + ATTACH 'test.db2' AS aux; + PRAGMA journal_mode = PERSIST; + CREATE TABLE t3(a, b); + INSERT INTO t3 SELECT randomblob(1500), randomblob(1500) FROM t1; + UPDATE t3 SET b = randomblob(1500); + } + expr [file size test.db-journal] > 15000 +} {1} +do_test pager1-5.5.2 { + execsql { + PRAGMA synchronous = full; + BEGIN; + DELETE FROM t1 WHERE b = 'Stalin'; + DELETE FROM t2 WHERE b = 'Stalin'; + COMMIT; + } + file size test.db-journal +} {0} + + +#------------------------------------------------------------------------- +# The following tests work with "PRAGMA max_page_count" +# +do_test pager1-6.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA auto_vacuum = none; + PRAGMA max_page_count = 10; + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); + CREATE TABLE t4(a, b); + CREATE TABLE t5(a, b); + CREATE TABLE t6(a, b); + CREATE TABLE t7(a, b); + CREATE TABLE t8(a, b); + CREATE TABLE t9(a, b); + CREATE TABLE t10(a, b); + } +} {10} +do_catchsql_test pager1-6.2 { + CREATE TABLE t11(a, b) +} {1 {database or disk is full}} +do_execsql_test pager1-6.4 { PRAGMA max_page_count } {10} +do_execsql_test pager1-6.5 { PRAGMA max_page_count = 15 } {15} +do_execsql_test pager1-6.6 { CREATE TABLE t11(a, b) } {} +do_execsql_test pager1-6.7 { + BEGIN; + INSERT INTO t11 VALUES(1, 2); + PRAGMA max_page_count = 13; +} {13} +do_execsql_test pager1-6.8 { + INSERT INTO t11 VALUES(3, 4); + PRAGMA max_page_count = 10; +} {11} +do_execsql_test pager1-6.9 { COMMIT } {} + + +#------------------------------------------------------------------------- +# The following tests work with "PRAGMA journal_mode=TRUNCATE" and +# "PRAGMA locking_mode=EXCLUSIVE". +# +# Each test is specified with 5 variables. As follows: +# +# $tn: Test Number. Used as part of the [do_test] test names. +# $sql: SQL to execute. +# $res: Expected result of executing $sql. +# $js: The expected size of the journal file, in bytes, after executing +# the SQL script. Or -1 if the journal is not expected to exist. +# $ws: The expected size of the WAL file, in bytes, after executing +# the SQL script. Or -1 if the WAL is not expected to exist. +# +ifcapable wal { + faultsim_delete_and_reopen + foreach {tn sql res js ws} [subst { + + 1 { + CREATE TABLE t1(a, b); + PRAGMA auto_vacuum=OFF; + PRAGMA synchronous=NORMAL; + PRAGMA page_size=1024; + PRAGMA locking_mode=EXCLUSIVE; + PRAGMA journal_mode=TRUNCATE; + INSERT INTO t1 VALUES(1, 2); + } {exclusive truncate} 0 -1 + + 2 { + BEGIN IMMEDIATE; + SELECT * FROM t1; + COMMIT; + } {1 2} 0 -1 + + 3 { + BEGIN; + SELECT * FROM t1; + COMMIT; + } {1 2} 0 -1 + + 4 { PRAGMA journal_mode = WAL } wal -1 -1 + 5 { INSERT INTO t1 VALUES(3, 4) } {} -1 [wal_file_size 1 1024] + 6 { PRAGMA locking_mode = NORMAL } normal -1 [wal_file_size 1 1024] + 7 { INSERT INTO t1 VALUES(5, 6); } {} -1 [wal_file_size 2 1024] + + 8 { PRAGMA journal_mode = TRUNCATE } truncate 0 -1 + 9 { INSERT INTO t1 VALUES(7, 8) } {} 0 -1 + 10 { SELECT * FROM t1 } {1 2 3 4 5 6 7 8} 0 -1 + + }] { + do_execsql_test pager1-7.1.$tn.1 $sql $res + catch { set J -1 ; set J [file size test.db-journal] } + catch { set W -1 ; set W [file size test.db-wal] } + do_test pager1-7.1.$tn.2 { list $J $W } [list $js $ws] + } +} + +do_test pager1-7.2.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA locking_mode = EXCLUSIVE; + CREATE TABLE t1(a, b); + BEGIN; + PRAGMA journal_mode = delete; + PRAGMA journal_mode = truncate; + } +} {exclusive delete truncate} +do_test pager1-7.2.2 { + execsql { INSERT INTO t1 VALUES(1, 2) } + execsql { PRAGMA journal_mode = persist } +} {truncate} +do_test pager1-7.2.3 { + execsql { COMMIT } + execsql { + PRAGMA journal_mode = persist; + PRAGMA journal_size_limit; + } +} {persist -1} + +#------------------------------------------------------------------------- +# The following tests, pager1-8.*, test that the special filenames +# ":memory:" and "" open temporary databases. +# +foreach {tn filename} { + 1 :memory: + 2 "" +} { + do_test pager1-8.$tn.1 { + faultsim_delete_and_reopen + db close + sqlite3 db $filename + execsql { + PRAGMA auto_vacuum = 1; + CREATE TABLE x1(x); + INSERT INTO x1 VALUES('Charles'); + INSERT INTO x1 VALUES('James'); + INSERT INTO x1 VALUES('Mary'); + SELECT * FROM x1; + } + } {Charles James Mary} + + do_test pager1-8.$tn.2 { + sqlite3 db2 $filename + catchsql { SELECT * FROM x1 } db2 + } {1 {no such table: x1}} + + do_execsql_test pager1-8.$tn.3 { + BEGIN; + INSERT INTO x1 VALUES('William'); + INSERT INTO x1 VALUES('Anne'); + ROLLBACK; + } {} +} + +#------------------------------------------------------------------------- +# The next block of tests - pager1-9.* - deal with interactions between +# the pager and the backup API. Test cases: +# +# pager1-9.1.*: Test that a backup completes successfully even if the +# source db is written to during the backup op. +# +# pager1-9.2.*: Test that a backup completes successfully even if the +# source db is written to and then rolled back during a +# backup operation. +# +do_test pager1-9.0.1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE ab(a, b, UNIQUE(a, b)); + INSERT INTO ab VALUES( a_string(200), a_string(300) ); + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + INSERT INTO ab SELECT a_string(200), a_string(300) FROM ab; + COMMIT; + } +} {} +do_test pager1-9.0.2 { + sqlite3 db2 test.db2 + db2 eval { PRAGMA cache_size = 10 } + sqlite3_backup B db2 main db main + list [B step 10000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test pager1-9.0.3 { + db one {SELECT md5sum(a, b) FROM ab} +} [db2 one {SELECT md5sum(a, b) FROM ab}] + +do_test pager1-9.1.1 { + execsql { UPDATE ab SET a = a_string(201) } + sqlite3_backup B db2 main db main + B step 30 +} {SQLITE_OK} +do_test pager1-9.1.2 { + execsql { UPDATE ab SET b = a_string(301) } + list [B step 10000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test pager1-9.1.3 { + db one {SELECT md5sum(a, b) FROM ab} +} [db2 one {SELECT md5sum(a, b) FROM ab}] +do_test pager1-9.1.4 { execsql { SELECT count(*) FROM ab } } {128} + +do_test pager1-9.2.1 { + execsql { UPDATE ab SET a = a_string(202) } + sqlite3_backup B db2 main db main + B step 30 +} {SQLITE_OK} +do_test pager1-9.2.2 { + execsql { + BEGIN; + UPDATE ab SET b = a_string(301); + ROLLBACK; + } + list [B step 10000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test pager1-9.2.3 { + db one {SELECT md5sum(a, b) FROM ab} +} [db2 one {SELECT md5sum(a, b) FROM ab}] +do_test pager1-9.2.4 { execsql { SELECT count(*) FROM ab } } {128} +db close +db2 close + +do_test pager1-9.3.1 { + testvfs tv -default 1 + tv sectorsize 4096 + faultsim_delete_and_reopen + + execsql { PRAGMA page_size = 1024 } + for {set ii 0} {$ii < 4} {incr ii} { execsql "CREATE TABLE t${ii}(a, b)" } +} {} +do_test pager1-9.3.2 { + sqlite3 db2 test.db2 + + execsql { + PRAGMA page_size = 4096; + PRAGMA synchronous = OFF; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + } db2 + + sqlite3_backup B db2 main db main + B step 30 + list [B step 10000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test pager1-9.3.3 { + db2 close + db close + tv delete + file size test.db2 +} [file size test.db] + +do_test pager1-9.4.1 { + faultsim_delete_and_reopen + sqlite3 db2 test.db2 + execsql { + PRAGMA page_size = 4096; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + } db2 + sqlite3_backup B db2 main db main + list [B step 10000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test pager1-9.4.2 { + list [file size test.db2] [file size test.db] +} {0 0} +db2 close + +#------------------------------------------------------------------------- +# Test that regardless of the value returned by xSectorSize(), the +# minimum effective sector-size is 512 and the maximum 65536 bytes. +# +testvfs tv -default 1 +foreach sectorsize { + 32 64 128 256 512 1024 2048 + 4096 8192 16384 32768 65536 131072 262144 +} { + tv sectorsize $sectorsize + set eff $sectorsize + if {$sectorsize < 512} { set eff 512 } + if {$sectorsize > 65536} { set eff 65536 } + + do_test pager1-10.$sectorsize.1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA journal_mode = PERSIST; + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); + COMMIT; + } + file size test.db-journal + } [expr $sectorsize > 65536 ? 65536 : $sectorsize] + + do_test pager1-10.$sectorsize.2 { + execsql { + INSERT INTO t3 VALUES(a_string(300), a_string(300)); + INSERT INTO t3 SELECT * FROM t3; /* 2 */ + INSERT INTO t3 SELECT * FROM t3; /* 4 */ + INSERT INTO t3 SELECT * FROM t3; /* 8 */ + INSERT INTO t3 SELECT * FROM t3; /* 16 */ + INSERT INTO t3 SELECT * FROM t3; /* 32 */ + } + } {} + + do_test pager1-10.$sectorsize.3 { + db close + sqlite3 db test.db + execsql { + PRAGMA cache_size = 10; + BEGIN; + } + recursive_select 32 t3 {db eval "INSERT INTO t2 VALUES(1, 2)"} + execsql { + COMMIT; + SELECT * FROM t2; + } + } {1 2} + + do_test pager1-10.$sectorsize.4 { + execsql { + CREATE TABLE t6(a, b); + CREATE TABLE t7(a, b); + CREATE TABLE t5(a, b); + DROP TABLE t6; + DROP TABLE t7; + } + execsql { + BEGIN; + CREATE TABLE t6(a, b); + } + recursive_select 32 t3 {db eval "INSERT INTO t5 VALUES(1, 2)"} + execsql { + COMMIT; + SELECT * FROM t5; + } + } {1 2} + +} +db close + +tv sectorsize 4096 +do_test pager1.10.x.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA auto_vacuum = none; + PRAGMA page_size = 1024; + CREATE TABLE t1(x); + } + for {set i 0} {$i<30} {incr i} { + execsql { INSERT INTO t1 VALUES(zeroblob(900)) } + } + file size test.db +} {32768} +do_test pager1.10.x.2 { + execsql { + CREATE TABLE t2(x); + DROP TABLE t2; + } + file size test.db +} {33792} +do_test pager1.10.x.3 { + execsql { + BEGIN; + CREATE TABLE t2(x); + } + recursive_select 30 t1 + execsql { + CREATE TABLE t3(x); + COMMIT; + } +} {} + +db close +tv delete + +testvfs tv -default 1 +faultsim_delete_and_reopen +db func a_string a_string +do_execsql_test pager1-11.1 { + PRAGMA journal_mode = DELETE; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE zz(top PRIMARY KEY); + INSERT INTO zz VALUES(a_string(222)); + INSERT INTO zz SELECT a_string((SELECT 222+max(rowid) FROM zz)) FROM zz; + INSERT INTO zz SELECT a_string((SELECT 222+max(rowid) FROM zz)) FROM zz; + INSERT INTO zz SELECT a_string((SELECT 222+max(rowid) FROM zz)) FROM zz; + INSERT INTO zz SELECT a_string((SELECT 222+max(rowid) FROM zz)) FROM zz; + INSERT INTO zz SELECT a_string((SELECT 222+max(rowid) FROM zz)) FROM zz; + COMMIT; + BEGIN; + UPDATE zz SET top = a_string(345); +} {delete} + +proc lockout {method args} { return SQLITE_IOERR } +tv script lockout +tv filter {xWrite xTruncate xSync} +do_catchsql_test pager1-11.2 { COMMIT } {1 {disk I/O error}} + +tv script {} +do_test pager1-11.3 { + sqlite3 db2 test.db + execsql { + PRAGMA journal_mode = TRUNCATE; + PRAGMA integrity_check; + } db2 +} {truncate ok} +do_test pager1-11.4 { + db2 close + file exists test.db-journal +} {0} +do_execsql_test pager1-11.5 { SELECT count(*) FROM zz } {32} +db close +tv delete + +#------------------------------------------------------------------------- +# Test "PRAGMA page_size" +# +testvfs tv -default 1 +tv sectorsize 1024 +foreach pagesize { + 512 1024 2048 4096 8192 16384 32768 +} { + faultsim_delete_and_reopen + + # The sector-size (according to the VFS) is 1024 bytes. So if the + # page-size requested using "PRAGMA page_size" is greater than the + # compile time value of SQLITE_MAX_PAGE_SIZE, then the effective + # page-size remains 1024 bytes. + # + set eff $pagesize + if {$eff > $::SQLITE_MAX_PAGE_SIZE} { set eff 1024 } + + do_test pager1-12.$pagesize.1 { + sqlite3 db2 test.db + execsql " + PRAGMA page_size = $pagesize; + CREATE VIEW v AS SELECT * FROM sqlite_master; + " db2 + file size test.db + } $eff + do_test pager1-12.$pagesize.2 { + sqlite3 db2 test.db + execsql { + SELECT count(*) FROM v; + PRAGMA main.page_size; + } db2 + } [list 1 $eff] + do_test pager1-12.$pagesize.3 { + execsql { + SELECT count(*) FROM v; + PRAGMA main.page_size; + } + } [list 1 $eff] + db2 close +} +db close +tv delete + +#------------------------------------------------------------------------- +# Test specal "PRAGMA journal_mode=PERSIST" test cases. +# +# pager1-13.1.*: This tests a special case encountered in persistent +# journal mode: If the journal associated with a transaction +# is smaller than the journal file (because a previous +# transaction left a very large non-hot journal file in the +# file-system), then SQLite has to be careful that there is +# not a journal-header left over from a previous transaction +# immediately following the journal content just written. +# If there is, and the process crashes so that the journal +# becomes a hot-journal and must be rolled back by another +# process, there is a danger that the other process may roll +# back the aborted transaction, then continue copying data +# from an older transaction from the remainder of the journal. +# See the syncJournal() function for details. +# +# pager1-13.2.*: Same test as the previous. This time, throw an index into +# the mix to make the integrity-check more likely to catch +# errors. +# +testvfs tv -default 1 +tv script xSyncCb +tv filter xSync +proc xSyncCb {method filename args} { + set t [file tail $filename] + if {$t == "test.db"} faultsim_save + return SQLITE_OK +} +faultsim_delete_and_reopen +db func a_string a_string + +# The UPDATE statement at the end of this test case creates a really big +# journal. Since the cache-size is only 10 pages, the journal contains +# frequent journal headers. +# +do_execsql_test pager1-13.1.1 { + PRAGMA page_size = 1024; + PRAGMA journal_mode = PERSIST; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB); + INSERT INTO t1 VALUES(NULL, a_string(400)); + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 4 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 8 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 16 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 32 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 64 */ + INSERT INTO t1 SELECT NULL, a_string(400) FROM t1; /* 128 */ + COMMIT; + UPDATE t1 SET b = a_string(400); +} {persist} + +# Run transactions of increasing sizes. Eventually, one (or more than one) +# of these will write just enough content that one of the old headers created +# by the transaction in the block above lies immediately after the content +# journalled by the current transaction. +# +for {set nUp 1} {$nUp<64} {incr nUp} { + do_execsql_test pager1-13.1.2.$nUp.1 { + UPDATE t1 SET b = a_string(399) WHERE a <= $nUp + } {} + do_execsql_test pager1-13.1.2.$nUp.2 { PRAGMA integrity_check } {ok} + + # Try to access the snapshot of the file-system. + # + sqlite3 db2 sv_test.db + do_test pager1-13.1.2.$nUp.3 { + execsql { SELECT sum(length(b)) FROM t1 } db2 + } [expr {128*400 - ($nUp-1)}] + do_test pager1-13.1.2.$nUp.4 { + execsql { PRAGMA integrity_check } db2 + } {ok} + db2 close +} + +# Same test as above. But this time with an index on the table. +# +do_execsql_test pager1-13.2.1 { + CREATE INDEX i1 ON t1(b); + UPDATE t1 SET b = a_string(400); +} {} +for {set nUp 1} {$nUp<64} {incr nUp} { + do_execsql_test pager1-13.2.2.$nUp.1 { + UPDATE t1 SET b = a_string(399) WHERE a <= $nUp + } {} + do_execsql_test pager1-13.2.2.$nUp.2 { PRAGMA integrity_check } {ok} + sqlite3 db2 sv_test.db + do_test pager1-13.2.2.$nUp.3 { + execsql { SELECT sum(length(b)) FROM t1 } db2 + } [expr {128*400 - ($nUp-1)}] + do_test pager1-13.2.2.$nUp.4 { + execsql { PRAGMA integrity_check } db2 + } {ok} + db2 close +} + +db close +tv delete + +#------------------------------------------------------------------------- +# Test specal "PRAGMA journal_mode=OFF" test cases. +# +faultsim_delete_and_reopen +do_execsql_test pager1-14.1.1 { + PRAGMA journal_mode = OFF; + CREATE TABLE t1(a, b); + BEGIN; + INSERT INTO t1 VALUES(1, 2); + COMMIT; + SELECT * FROM t1; +} {off 1 2} +do_catchsql_test pager1-14.1.2 { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + ROLLBACK; +} {0 {}} +do_execsql_test pager1-14.1.3 { + SELECT * FROM t1; +} {1 2 3 4} +do_catchsql_test pager1-14.1.4 { + BEGIN; + INSERT INTO t1(rowid, a, b) SELECT a+3, b, b FROM t1; + INSERT INTO t1(rowid, a, b) SELECT a+3, b, b FROM t1; +} {1 {PRIMARY KEY must be unique}} +do_execsql_test pager1-14.1.5 { + COMMIT; + SELECT * FROM t1; +} {1 2 3 4 2 2 4 4} + +#------------------------------------------------------------------------- +# Test opening and closing the pager sub-system with different values +# for the sqlite3_vfs.szOsFile variable. +# +faultsim_delete_and_reopen +do_execsql_test pager1-15.0 { + CREATE TABLE tx(y, z); + INSERT INTO tx VALUES('Ayutthaya', 'Beijing'); + INSERT INTO tx VALUES('London', 'Tokyo'); +} {} +db close +for {set i 0} {$i<513} {incr i 3} { + testvfs tv -default 1 -szosfile $i + sqlite3 db test.db + do_execsql_test pager1-15.$i.1 { + SELECT * FROM tx; + } {Ayutthaya Beijing London Tokyo} + db close + tv delete +} + +#------------------------------------------------------------------------- +# Check that it is not possible to open a database file if the full path +# to the associated journal file will be longer than sqlite3_vfs.mxPathname. +# +testvfs tv -default 1 +tv script xOpenCb +tv filter xOpen +proc xOpenCb {method filename} { + set ::file_len [string length $filename] +} +sqlite3 db test.db +db close +tv delete + +for {set ii [expr $::file_len-5]} {$ii < [expr $::file_len+20]} {incr ii} { + testvfs tv -default 1 -mxpathname $ii + + # The length of the full path to file "test.db-journal" is ($::file_len+8). + # If the configured sqlite3_vfs.mxPathname value greater than or equal to + # this, then the file can be opened. Otherwise, it cannot. + # + if {$ii >= [expr $::file_len+8]} { + set res {0 {}} + } else { + set res {1 {unable to open database file}} + } + + do_test pager1-16.1.$ii { + list [catch { sqlite3 db test.db } msg] $msg + } $res + + catch {db close} + tv delete +} + +#------------------------------------------------------------------------- +# Test "PRAGMA omit_readlock". +# +# pager1-17.$tn.1.*: Test that if a second connection has an open +# read-transaction, it is not usually possible to write +# the database. +# +# pager1-17.$tn.2.*: Test that if the second connection was opened with +# the SQLITE_OPEN_READONLY flag, and +# "PRAGMA omit_readlock = 1" is executed before attaching +# the database and opening a read-transaction on it, it is +# possible to write the db. +# +# pager1-17.$tn.3.*: Test that if the second connection was *not* opened with +# the SQLITE_OPEN_READONLY flag, executing +# "PRAGMA omit_readlock = 1" has no effect. +# +do_multiclient_test tn { + do_test pager1-17.$tn.1.1 { + sql1 { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } + sql2 { + BEGIN; + SELECT * FROM t1; + } + } {1 2} + do_test pager1-17.$tn.1.2 { + csql1 { INSERT INTO t1 VALUES(3, 4) } + } {1 {database is locked}} + do_test pager1-17.$tn.1.3 { + sql2 { COMMIT } + sql1 { INSERT INTO t1 VALUES(3, 4) } + } {} + + do_test pager1-17.$tn.2.1 { + code2 { + db2 close + sqlite3 db2 :memory: -readonly 1 + } + sql2 { + PRAGMA omit_readlock = 1; + ATTACH 'test.db' AS two; + BEGIN; + SELECT * FROM t1; + } + } {1 2 3 4} + do_test pager1-17.$tn.2.2 { sql1 "INSERT INTO t1 VALUES(5, 6)" } {} + do_test pager1-17.$tn.2.3 { sql2 "SELECT * FROM t1" } {1 2 3 4} + do_test pager1-17.$tn.2.4 { sql2 "COMMIT ; SELECT * FROM t1" } {1 2 3 4 5 6} + + do_test pager1-17.$tn.3.1 { + code2 { + db2 close + sqlite3 db2 :memory: + } + sql2 { + PRAGMA omit_readlock = 1; + ATTACH 'test.db' AS two; + BEGIN; + SELECT * FROM t1; + } + } {1 2 3 4 5 6} + do_test pager1-17.$tn.3.2 { + csql1 { INSERT INTO t1 VALUES(3, 4) } + } {1 {database is locked}} + do_test pager1-17.$tn.3.3 { sql2 COMMIT } {} +} + +#------------------------------------------------------------------------- +# Test the pagers response to the b-tree layer requesting illegal page +# numbers: +# +# + The locking page, +# + Page 0, +# + A page with a page number greater than (2^31-1). +# +do_test pager1-18.1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(a_string(500), a_string(200)); + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + INSERT INTO t1 SELECT a_string(500), a_string(200) FROM t1; + } +} {} +do_test pager1-18.2 { + set root [db one "SELECT rootpage FROM sqlite_master"] + set lockingpage [expr (0x10000/1024) + 1] + execsql { + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET rootpage = $lockingpage; + } + sqlite3 db2 test.db + catchsql { SELECT count(*) FROM t1 } db2 +} {1 {database disk image is malformed}} +db2 close +do_test pager1-18.3 { + execsql { + CREATE TABLE t2(x); + INSERT INTO t2 VALUES(a_string(5000)); + } + set pgno [expr ([file size test.db] / 1024)-2] + hexio_write test.db [expr ($pgno-1)*1024] 00000000 + sqlite3 db2 test.db + catchsql { SELECT length(x) FROM t2 } db2 +} {1 {database disk image is malformed}} +db2 close +do_test pager1-18.4 { + hexio_write test.db [expr ($pgno-1)*1024] 90000000 + sqlite3 db2 test.db + catchsql { SELECT length(x) FROM t2 } db2 +} {1 {database disk image is malformed}} +db2 close +do_test pager1-18.5 { + sqlite3 db "" + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET rootpage=5 WHERE tbl_name = 't1'; + PRAGMA writable_schema = 0; + ALTER TABLE t1 RENAME TO x1; + } + catchsql { SELECT * FROM x1 } +} {1 {database disk image is malformed}} +db close + +do_test pager1-18.6 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(a_string(800)); + INSERT INTO t1 VALUES(a_string(800)); + } + + set root [db one "SELECT rootpage FROM sqlite_master"] + db close + + hexio_write test.db [expr ($root-1)*1024 + 8] 00000000 + sqlite3 db test.db + catchsql { SELECT length(x) FROM t1 } +} {1 {database disk image is malformed}} + +do_test pager1-19.1 { + sqlite3 db "" + db func a_string a_string + execsql { + PRAGMA page_size = 512; + PRAGMA auto_vacuum = 1; + CREATE TABLE t1(aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an, + ba, bb, bc, bd, be, bf, bg, bh, bi, bj, bk, bl, bm, bn, + ca, cb, cc, cd, ce, cf, cg, ch, ci, cj, ck, cl, cm, cn, + da, db, dc, dd, de, df, dg, dh, di, dj, dk, dl, dm, dn, + ea, eb, ec, ed, ee, ef, eg, eh, ei, ej, ek, el, em, en, + fa, fb, fc, fd, fe, ff, fg, fh, fi, fj, fk, fl, fm, fn, + ga, gb, gc, gd, ge, gf, gg, gh, gi, gj, gk, gl, gm, gn, + ha, hb, hc, hd, he, hf, hg, hh, hi, hj, hk, hl, hm, hn, + ia, ib, ic, id, ie, if, ig, ih, ii, ij, ik, il, im, ix, + ja, jb, jc, jd, je, jf, jg, jh, ji, jj, jk, jl, jm, jn, + ka, kb, kc, kd, ke, kf, kg, kh, ki, kj, kk, kl, km, kn, + la, lb, lc, ld, le, lf, lg, lh, li, lj, lk, ll, lm, ln, + ma, mb, mc, md, me, mf, mg, mh, mi, mj, mk, ml, mm, mn + ); + CREATE TABLE t2(aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an, + ba, bb, bc, bd, be, bf, bg, bh, bi, bj, bk, bl, bm, bn, + ca, cb, cc, cd, ce, cf, cg, ch, ci, cj, ck, cl, cm, cn, + da, db, dc, dd, de, df, dg, dh, di, dj, dk, dl, dm, dn, + ea, eb, ec, ed, ee, ef, eg, eh, ei, ej, ek, el, em, en, + fa, fb, fc, fd, fe, ff, fg, fh, fi, fj, fk, fl, fm, fn, + ga, gb, gc, gd, ge, gf, gg, gh, gi, gj, gk, gl, gm, gn, + ha, hb, hc, hd, he, hf, hg, hh, hi, hj, hk, hl, hm, hn, + ia, ib, ic, id, ie, if, ig, ih, ii, ij, ik, il, im, ix, + ja, jb, jc, jd, je, jf, jg, jh, ji, jj, jk, jl, jm, jn, + ka, kb, kc, kd, ke, kf, kg, kh, ki, kj, kk, kl, km, kn, + la, lb, lc, ld, le, lf, lg, lh, li, lj, lk, ll, lm, ln, + ma, mb, mc, md, me, mf, mg, mh, mi, mj, mk, ml, mm, mn + ); + INSERT INTO t1(aa) VALUES( a_string(100000) ); + INSERT INTO t2(aa) VALUES( a_string(100000) ); + VACUUM; + } +} {} + +#------------------------------------------------------------------------- +# Test a couple of special cases that come up while committing +# transactions: +# +# pager1-20.1.*: Committing an in-memory database transaction when the +# database has not been modified at all. +# +# pager1-20.2.*: As above, but with a normal db in exclusive-locking mode. +# +# pager1-20.3.*: Committing a transaction in WAL mode where the database has +# been modified, but all dirty pages have been flushed to +# disk before the commit. +# +do_test pager1-20.1.1 { + catch {db close} + sqlite3 db :memory: + execsql { + CREATE TABLE one(two, three); + INSERT INTO one VALUES('a', 'b'); + } +} {} +do_test pager1-20.1.2 { + execsql { + BEGIN EXCLUSIVE; + COMMIT; + } +} {} + +do_test pager1-20.2.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA locking_mode = exclusive; + PRAGMA journal_mode = persist; + CREATE TABLE one(two, three); + INSERT INTO one VALUES('a', 'b'); + } +} {exclusive persist} +do_test pager1-20.2.2 { + execsql { + BEGIN EXCLUSIVE; + COMMIT; + } +} {} + +do_test pager1-20.3.1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA cache_size = 10; + PRAGMA journal_mode = wal; + BEGIN; + CREATE TABLE t1(x); + CREATE TABLE t2(y); + INSERT INTO t1 VALUES(a_string(800)); + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 2 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 4 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 8 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 16 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 32 */ + COMMIT; + } +} {wal} +do_test pager1-20.3.2 { + execsql { + BEGIN; + INSERT INTO t2 VALUES('xxxx'); + } + recursive_select 32 t1 + execsql COMMIT +} {} + +#------------------------------------------------------------------------- +# Test that a WAL database may not be opened if: +# +# pager1-21.1.*: The VFS has an iVersion less than 2, or +# pager1-21.2.*: The VFS does not provide xShmXXX() methods. +# +do_test pager1-21.0 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE ko(c DEFAULT 'abc', b DEFAULT 'def'); + INSERT INTO ko DEFAULT VALUES; + } +} {wal} +do_test pager1-21.1 { + testvfs tv -noshm 1 + sqlite3 db2 test.db -vfs tv + catchsql { SELECT * FROM ko } db2 +} {1 {unable to open database file}} +db2 close +tv delete +do_test pager1-21.2 { + testvfs tv -iversion 1 + sqlite3 db2 test.db -vfs tv + catchsql { SELECT * FROM ko } db2 +} {1 {unable to open database file}} +db2 close +tv delete + +#------------------------------------------------------------------------- +# Test that a "PRAGMA wal_checkpoint": +# +# pager1-22.1.*: is a no-op on a non-WAL db, and +# pager1-22.2.*: does not cause xSync calls with a synchronous=off db. +# +do_test pager1-22.1.1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE ko(c DEFAULT 'abc', b DEFAULT 'def'); + INSERT INTO ko DEFAULT VALUES; + } + execsql { PRAGMA wal_checkpoint } +} {} +do_test pager1-22.2.1 { + testvfs tv -default 1 + tv filter xSync + tv script xSyncCb + proc xSyncCb {args} {incr ::synccount} + set ::synccount 0 + sqlite3 db test.db + execsql { + PRAGMA synchronous = off; + PRAGMA journal_mode = WAL; + INSERT INTO ko DEFAULT VALUES; + } + execsql { PRAGMA wal_checkpoint } + set synccount +} {0} +db close +tv delete + +#------------------------------------------------------------------------- +# Tests for changing journal mode. +# +# pager1-23.1.*: Test that when changing from PERSIST to DELETE mode, +# the journal file is deleted. +# +# pager1-23.2.*: Same test as above, but while a shared lock is held +# on the database file. +# +# pager1-23.3.*: Same test as above, but while a reserved lock is held +# on the database file. +# +# pager1-23.4.*: And, for fun, while holding an exclusive lock. +# +# pager1-23.5.*: Try to set various different journal modes with an +# in-memory database (only MEMORY and OFF should work). +# +# pager1-23.6.*: Try to set locking_mode=normal on an in-memory database +# (doesn't work - in-memory databases always use +# locking_mode=exclusive). +# +do_test pager1-23.1.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = PERSIST; + CREATE TABLE t1(a, b); + } + file exists test.db-journal +} {1} +do_test pager1-23.1.2 { + execsql { PRAGMA journal_mode = DELETE } + file exists test.db-journal +} {0} + +do_test pager1-23.2.1 { + execsql { + PRAGMA journal_mode = PERSIST; + INSERT INTO t1 VALUES('Canberra', 'ACT'); + } + db eval { SELECT * FROM t1 } { + db eval { PRAGMA journal_mode = DELETE } + } + execsql { PRAGMA journal_mode } +} {delete} +do_test pager1-23.2.2 { + file exists test.db-journal +} {0} + +do_test pager1-23.3.1 { + execsql { + PRAGMA journal_mode = PERSIST; + INSERT INTO t1 VALUES('Darwin', 'NT'); + BEGIN IMMEDIATE; + } + db eval { PRAGMA journal_mode = DELETE } + execsql { PRAGMA journal_mode } +} {delete} +do_test pager1-23.3.2 { + file exists test.db-journal +} {0} +do_test pager1-23.3.3 { + execsql COMMIT +} {} + +do_test pager1-23.4.1 { + execsql { + PRAGMA journal_mode = PERSIST; + INSERT INTO t1 VALUES('Adelaide', 'SA'); + BEGIN EXCLUSIVE; + } + db eval { PRAGMA journal_mode = DELETE } + execsql { PRAGMA journal_mode } +} {delete} +do_test pager1-23.4.2 { + file exists test.db-journal +} {0} +do_test pager1-23.4.3 { + execsql COMMIT +} {} + +do_test pager1-23.5.1 { + faultsim_delete_and_reopen + sqlite3 db :memory: +} {} +foreach {tn mode possible} { + 2 off 1 + 3 memory 1 + 4 persist 0 + 5 delete 0 + 6 wal 0 + 7 truncate 0 +} { + do_test pager1-23.5.$tn.1 { + execsql "PRAGMA journal_mode = off" + execsql "PRAGMA journal_mode = $mode" + } [if $possible {list $mode} {list off}] + do_test pager1-23.5.$tn.2 { + execsql "PRAGMA journal_mode = memory" + execsql "PRAGMA journal_mode = $mode" + } [if $possible {list $mode} {list memory}] +} +do_test pager1-23.6.1 { + execsql {PRAGMA locking_mode = normal} +} {exclusive} +do_test pager1-23.6.2 { + execsql {PRAGMA locking_mode = exclusive} +} {exclusive} +do_test pager1-23.6.3 { + execsql {PRAGMA locking_mode} +} {exclusive} +do_test pager1-23.6.4 { + execsql {PRAGMA main.locking_mode} +} {exclusive} + +#------------------------------------------------------------------------- +# +do_test pager1-24.1.1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA cache_size = 10; + PRAGMA auto_vacuum = FULL; + CREATE TABLE x1(x, y, z, PRIMARY KEY(y, z)); + CREATE TABLE x2(x, y, z, PRIMARY KEY(y, z)); + INSERT INTO x2 VALUES(a_string(400), a_string(500), a_string(600)); + INSERT INTO x2 SELECT a_string(600), a_string(400), a_string(500) FROM x2; + INSERT INTO x2 SELECT a_string(500), a_string(600), a_string(400) FROM x2; + INSERT INTO x2 SELECT a_string(400), a_string(500), a_string(600) FROM x2; + INSERT INTO x2 SELECT a_string(600), a_string(400), a_string(500) FROM x2; + INSERT INTO x2 SELECT a_string(500), a_string(600), a_string(400) FROM x2; + INSERT INTO x2 SELECT a_string(400), a_string(500), a_string(600) FROM x2; + INSERT INTO x1 SELECT * FROM x2; + } +} {} +do_test pager1-24.1.2 { + execsql { + BEGIN; + DELETE FROM x1 WHERE rowid<32; + } + recursive_select 64 x2 +} {} +do_test pager1-24.1.3 { + execsql { + UPDATE x1 SET z = a_string(300) WHERE rowid>40; + COMMIT; + PRAGMA integrity_check; + SELECT count(*) FROM x1; + } +} {ok 33} + +do_test pager1-24.1.4 { + execsql { + DELETE FROM x1; + INSERT INTO x1 SELECT * FROM x2; + BEGIN; + DELETE FROM x1 WHERE rowid<32; + UPDATE x1 SET z = a_string(299) WHERE rowid>40; + } + recursive_select 64 x2 {db eval COMMIT} + execsql { + PRAGMA integrity_check; + SELECT count(*) FROM x1; + } +} {ok 33} + +do_test pager1-24.1.5 { + execsql { + DELETE FROM x1; + INSERT INTO x1 SELECT * FROM x2; + } + recursive_select 64 x2 { db eval {CREATE TABLE x3(x, y, z)} } + execsql { SELECT * FROM x3 } +} {} + +#------------------------------------------------------------------------- +# +do_test pager1-25-1 { + faultsim_delete_and_reopen + execsql { + BEGIN; + SAVEPOINT abc; + CREATE TABLE t1(a, b); + ROLLBACK TO abc; + COMMIT; + } + db close +} {} +breakpoint +do_test pager1-25-2 { + faultsim_delete_and_reopen + execsql { + SAVEPOINT abc; + CREATE TABLE t1(a, b); + ROLLBACK TO abc; + COMMIT; + } + db close +} {} + +#------------------------------------------------------------------------- +# Sector-size tests. +# +do_test pager1-26.1 { + testvfs tv -default 1 + tv sectorsize 4096 + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA page_size = 512; + CREATE TABLE tbl(a PRIMARY KEY, b UNIQUE); + BEGIN; + INSERT INTO tbl VALUES(a_string(25), a_string(600)); + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + INSERT INTO tbl SELECT a_string(25), a_string(600) FROM tbl; + COMMIT; + } +} {} +do_execsql_test pager1-26.1 { + UPDATE tbl SET b = a_string(550); +} {} +db close +tv delete + +#------------------------------------------------------------------------- +do_test pager1.27.1 { + faultsim_delete_and_reopen + sqlite3_pager_refcounts db + execsql { + BEGIN; + CREATE TABLE t1(a, b); + } + sqlite3_pager_refcounts db + execsql COMMIT +} {} + +finish_test diff --git a/test/pager2.test b/test/pager2.test new file mode 100644 index 0000000..977d45f --- /dev/null +++ b/test/pager2.test @@ -0,0 +1,119 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +set otn 0 +testvfs tv -default 1 +foreach code [list { + set s 512 +} { + set s 1024 + set sql { PRAGMA journal_mode = memory } +} { + set s 1024 + set sql { + PRAGMA journal_mode = memory; + PRAGMA locking_mode = exclusive; + } +} { + set s 2048 + tv devchar safe_append +} { + set s 4096 +} { + set s 4096 + set sql { PRAGMA journal_mode = WAL } +} { + set s 4096 + set sql { PRAGMA auto_vacuum = 1 } +} { + set s 8192 + set sql { PRAGMA synchronous = off } +}] { + + incr otn + set sql "" + tv devchar {} + eval $code + tv sectorsize $s + + do_test pager2-1.$otn.0 { + faultsim_delete_and_reopen + execsql $sql + execsql { + PRAGMA cache_size = 10; + CREATE TABLE t1(i INTEGER PRIMARY KEY, j blob); + } + } {} + + set tn 0 + set lowpoint 0 + foreach x { + 100 x 0 100 + x + 70 22 96 59 96 50 22 56 21 16 37 64 43 40 0 38 22 38 55 0 6 + 43 62 32 93 54 18 13 29 45 66 29 25 61 31 53 82 75 25 96 86 10 69 + 2 29 6 60 80 95 42 82 85 50 68 96 90 39 78 69 87 97 48 74 65 43 + x + 86 34 26 50 41 85 58 44 89 22 6 51 45 46 58 32 97 6 1 12 32 2 + 69 39 48 71 33 31 5 58 90 43 24 54 12 9 18 57 4 38 91 42 27 45 + 50 38 56 29 10 0 26 37 83 1 78 15 47 30 75 62 46 29 68 5 30 4 + 27 96 33 95 79 75 56 10 29 70 32 75 52 88 5 36 50 57 46 63 88 65 + x + 44 95 64 20 24 35 69 61 61 2 35 92 42 46 23 98 78 1 38 72 79 35 + 94 37 13 59 5 93 27 58 80 75 58 7 67 13 10 76 84 4 8 70 81 45 + 8 41 98 5 60 26 92 29 91 90 2 62 40 4 5 22 80 15 83 76 52 88 + 29 5 68 73 72 7 54 17 89 32 81 94 51 28 53 71 8 42 54 59 70 79 + x + } { + incr tn + set now [db one {SELECT count(i) FROM t1}] + if {$x == "x"} { + execsql { COMMIT ; BEGIN } + set lowpoint $now + do_test pager2.1.$otn.$tn { + sqlite3 db2 test.db + execsql { + SELECT COALESCE(max(i), 0) FROM t1; + PRAGMA integrity_check; + } + } [list $lowpoint ok] + db2 close + } else { + if {$now > $x } { + if { $x>=$lowpoint } { + execsql "ROLLBACK TO sp_$x" + } else { + execsql "DELETE FROM t1 WHERE i>$x" + set lowpoint $x + } + } elseif {$now < $x} { + for {set k $now} {$k < $x} {incr k} { + execsql "SAVEPOINT sp_$k" + execsql { INSERT INTO t1(j) VALUES(randomblob(1500)) } + } + } + do_execsql_test pager2.1.$otn.$tn { + SELECT COALESCE(max(i), 0) FROM t1; + PRAGMA integrity_check; + } [list $x ok] + } + } +} +db close +tv delete + +finish_test diff --git a/test/pagerfault.test b/test/pagerfault.test new file mode 100644 index 0000000..89f5a11 --- /dev/null +++ b/test/pagerfault.test @@ -0,0 +1,1048 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +if {[permutation] == "inmemory_journal"} { + finish_test + return +} + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} +db func a_string a_string + +#------------------------------------------------------------------------- +# Test fault-injection while rolling back a hot-journal file. +# +do_test pagerfault-1-pre1 { + execsql { + PRAGMA journal_mode = DELETE; + PRAGMA cache_size = 10; + CREATE TABLE t1(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(a_string(200), a_string(300)); + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + BEGIN; + INSERT INTO t1 SELECT a_string(201), a_string(301) FROM t1; + INSERT INTO t1 SELECT a_string(202), a_string(302) FROM t1; + INSERT INTO t1 SELECT a_string(203), a_string(303) FROM t1; + INSERT INTO t1 SELECT a_string(204), a_string(304) FROM t1; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-1 -prep { + faultsim_restore_and_reopen +} -body { + execsql { SELECT count(*) FROM t1 } +} -test { + faultsim_test_result {0 4} + faultsim_integrity_check + if {[db one { SELECT count(*) FROM t1 }] != 4} { + error "Database content appears incorrect" + } +} + +#------------------------------------------------------------------------- +# Test fault-injection while rolling back a hot-journal file with a +# page-size different from the current value stored on page 1 of the +# database file. +# +do_test pagerfault-2-pre1 { + testvfs tv -default 1 + tv filter xSync + tv script xSyncCb + proc xSyncCb {filename args} { + if {[string match *journal filename]==0} faultsim_save + } + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 4096; + BEGIN; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES('o', 't', 't'); + INSERT INTO abc VALUES('f', 'f', 's'); + INSERT INTO abc SELECT * FROM abc; -- 4 + INSERT INTO abc SELECT * FROM abc; -- 8 + INSERT INTO abc SELECT * FROM abc; -- 16 + INSERT INTO abc SELECT * FROM abc; -- 32 + INSERT INTO abc SELECT * FROM abc; -- 64 + INSERT INTO abc SELECT * FROM abc; -- 128 + INSERT INTO abc SELECT * FROM abc; -- 256 + COMMIT; + PRAGMA page_size = 1024; + VACUUM; + } + db close + tv delete +} {} +do_faultsim_test pagerfault-2 -prep { + faultsim_restore_and_reopen +} -body { + execsql { SELECT * FROM abc } +} -test { + set answer [split [string repeat "ottffs" 128] ""] + faultsim_test_result [list 0 $answer] + faultsim_integrity_check + set res [db eval { SELECT * FROM abc }] + if {$res != $answer} { error "Database content appears incorrect ($res)" } +} + +#------------------------------------------------------------------------- +# Test fault-injection while rolling back hot-journals that were created +# as part of a multi-file transaction. +# +do_test pagerfault-3-pre1 { + testvfs tstvfs -default 1 + tstvfs filter xDelete + tstvfs script xDeleteCallback + + proc xDeleteCallback {method file args} { + set file [file tail $file] + if { [string match *mj* $file] } { faultsim_save } + } + + faultsim_delete_and_reopen + db func a_string a_string + + execsql { + ATTACH 'test.db2' AS aux; + PRAGMA journal_mode = DELETE; + PRAGMA main.cache_size = 10; + PRAGMA aux.cache_size = 10; + + CREATE TABLE t1(a UNIQUE, b UNIQUE); + CREATE TABLE aux.t2(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(a_string(200), a_string(300)); + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t2 SELECT * FROM t1; + + BEGIN; + INSERT INTO t1 SELECT a_string(201), a_string(301) FROM t1; + INSERT INTO t1 SELECT a_string(202), a_string(302) FROM t1; + INSERT INTO t1 SELECT a_string(203), a_string(303) FROM t1; + INSERT INTO t1 SELECT a_string(204), a_string(304) FROM t1; + REPLACE INTO t2 SELECT * FROM t1; + COMMIT; + } + + db close + tstvfs delete +} {} +do_faultsim_test pagerfault-3 -prep { + faultsim_restore_and_reopen +} -body { + execsql { + ATTACH 'test.db2' AS aux; + SELECT count(*) FROM t2; + SELECT count(*) FROM t1; + } +} -test { + faultsim_test_result {0 {4 4}} {1 {unable to open database: test.db2}} + faultsim_integrity_check + catchsql { ATTACH 'test.db2' AS aux } + if {[db one { SELECT count(*) FROM t1 }] != 4 + || [db one { SELECT count(*) FROM t2 }] != 4 + } { + error "Database content appears incorrect" + } +} + +#------------------------------------------------------------------------- +# Test fault-injection as part of a vanilla, no-transaction, INSERT +# statement. +# +do_faultsim_test pagerfault-4 -prep { + faultsim_delete_and_reopen +} -body { + execsql { + CREATE TABLE x(y); + INSERT INTO x VALUES('z'); + SELECT * FROM x; + } +} -test { + faultsim_test_result {0 z} + faultsim_integrity_check +} + +#------------------------------------------------------------------------- +# Test fault-injection as part of a commit when using journal_mode=PERSIST. +# Three different cases: +# +# pagerfault-5.1: With no journal_size_limit configured. +# pagerfault-5.2: With a journal_size_limit configured. +# pagerfault-5.4: Multi-file transaction. One connection has a +# journal_size_limit of 0, the other has no limit. +# +do_test pagerfault-5-pre1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + CREATE TABLE t1(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(a_string(200), a_string(300)); + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-5.1 -prep { + faultsim_restore_and_reopen + db func a_string a_string + execsql { PRAGMA journal_mode = PERSIST } +} -body { + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} +do_faultsim_test pagerfault-5.2 -prep { + faultsim_restore_and_reopen + db func a_string a_string + execsql { + PRAGMA journal_mode = PERSIST; + PRAGMA journal_size_limit = 2048; + } +} -body { + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} +do_faultsim_test pagerfault-5.3 -faults oom-transient -prep { + faultsim_restore_and_reopen + db func a_string a_string + file delete -force test2.db test2.db-journal test2.db-wal + execsql { + PRAGMA journal_mode = PERSIST; + ATTACH 'test2.db' AS aux; + PRAGMA aux.journal_mode = PERSIST; + PRAGMA aux.journal_size_limit = 0; + } +} -body { + execsql { + BEGIN; + INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1; + CREATE TABLE aux.t2 AS SELECT * FROM t1; + COMMIT; + } +} -test { + faultsim_test_result {0 {}} + + catchsql { COMMIT } + catchsql { ROLLBACK } + + faultsim_integrity_check + set res "" + set rc [catch { set res [db one { PRAGMA aux.integrity_check }] }] + if {$rc!=0 || $res != "ok"} {error "integrity-check problem:$rc $res"} +} + +#------------------------------------------------------------------------- +# Test fault-injection as part of a commit when using +# journal_mode=TRUNCATE. +# +do_test pagerfault-6-pre1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + CREATE TABLE t1(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(a_string(200), a_string(300)); + } + faultsim_save_and_close +} {} + +do_faultsim_test pagerfault-6.1 -prep { + faultsim_restore_and_reopen + db func a_string a_string + execsql { PRAGMA journal_mode = TRUNCATE } +} -body { + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +# The unix vfs xAccess() method considers a file zero bytes in size to +# "not exist". This proc overrides that behaviour so that a zero length +# file is considered to exist. +# +proc xAccess {method filename op args} { + if {$op != "SQLITE_ACCESS_EXISTS"} { return "" } + return [file exists $filename] +} +do_faultsim_test pagerfault-6.2 -faults cantopen-* -prep { + shmfault filter xAccess + shmfault script xAccess + + faultsim_restore_and_reopen + db func a_string a_string + execsql { PRAGMA journal_mode = TRUNCATE } +} -body { + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } + execsql { INSERT INTO t1 SELECT a_string(200), a_string(300) FROM t1 } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +# The following was an attempt to get a bitvec malloc to fail. Didn't work. +# +# do_test pagerfault-6-pre1 { +# faultsim_delete_and_reopen +# execsql { +# CREATE TABLE t1(x, y, UNIQUE(x, y)); +# INSERT INTO t1 VALUES(1, randomblob(1501)); +# INSERT INTO t1 VALUES(2, randomblob(1502)); +# INSERT INTO t1 VALUES(3, randomblob(1503)); +# INSERT INTO t1 VALUES(4, randomblob(1504)); +# INSERT INTO t1 +# SELECT x, randomblob(1500+oid+(SELECT max(oid) FROM t1)) FROM t1; +# INSERT INTO t1 +# SELECT x, randomblob(1500+oid+(SELECT max(oid) FROM t1)) FROM t1; +# INSERT INTO t1 +# SELECT x, randomblob(1500+oid+(SELECT max(oid) FROM t1)) FROM t1; +# INSERT INTO t1 +# SELECT x, randomblob(1500+oid+(SELECT max(oid) FROM t1)) FROM t1; +# } +# faultsim_save_and_close +# } {} +# do_faultsim_test pagerfault-6 -prep { +# faultsim_restore_and_reopen +# } -body { +# execsql { +# BEGIN; +# UPDATE t1 SET x=x+4 WHERE x=1; +# SAVEPOINT one; +# UPDATE t1 SET x=x+4 WHERE x=2; +# SAVEPOINT three; +# UPDATE t1 SET x=x+4 WHERE x=3; +# SAVEPOINT four; +# UPDATE t1 SET x=x+4 WHERE x=4; +# RELEASE three; +# COMMIT; +# SELECT DISTINCT x FROM t1; +# } +# } -test { +# faultsim_test_result {0 {5 6 7 8}} +# faultsim_integrity_check +# } +# + +# This is designed to provoke a special case in the pager code: +# +# If an error (specifically, a FULL or IOERR error) occurs while writing a +# dirty page to the file-system in order to free up memory, the pager enters +# the "error state". An IO error causes SQLite to roll back the current +# transaction (exiting the error state). A FULL error, however, may only +# rollback the current statement. +# +# This block tests that nothing goes wrong if a FULL error occurs while +# writing a dirty page out to free memory from within a statement that has +# opened a statement transaction. +# +do_test pagerfault-7-pre1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + BEGIN; + INSERT INTO t2 VALUES(NULL, randomblob(1500)); + INSERT INTO t2 VALUES(NULL, randomblob(1500)); + INSERT INTO t2 SELECT NULL, randomblob(1500) FROM t2; -- 4 + INSERT INTO t2 SELECT NULL, randomblob(1500) FROM t2; -- 8 + INSERT INTO t2 SELECT NULL, randomblob(1500) FROM t2; -- 16 + INSERT INTO t2 SELECT NULL, randomblob(1500) FROM t2; -- 32 + INSERT INTO t2 SELECT NULL, randomblob(1500) FROM t2; -- 64 + COMMIT; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 SELECT * FROM t2; + DROP TABLE t2; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-7 -prep { + faultsim_restore_and_reopen + execsql { + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET b = randomblob(1500); + } +} -body { + execsql { UPDATE t1 SET a = 65, b = randomblob(1500) WHERE (a+1)>200 } + execsql COMMIT +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +do_test pagerfault-8-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA auto_vacuum = 1; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + BEGIN; + INSERT INTO t1 VALUES(NULL, randomblob(1500)); + INSERT INTO t1 VALUES(NULL, randomblob(1500)); + INSERT INTO t1 SELECT NULL, randomblob(1500) FROM t1; -- 4 + INSERT INTO t1 SELECT NULL, randomblob(1500) FROM t1; -- 8 + INSERT INTO t1 SELECT NULL, randomblob(1500) FROM t1; -- 16 + INSERT INTO t1 SELECT NULL, randomblob(1500) FROM t1; -- 32 + INSERT INTO t1 SELECT NULL, randomblob(1500) FROM t1; -- 64 + COMMIT; + } + faultsim_save_and_close + set filesize [file size test.db] + set {} {} +} {} +do_test pagerfault-8-pre2 { + faultsim_restore_and_reopen + execsql { DELETE FROM t1 WHERE a>32 } + expr {[file size test.db] < $filesize} +} {1} +do_faultsim_test pagerfault-8 -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + DELETE FROM t1 WHERE a>32; + } +} -body { + execsql COMMIT +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +#------------------------------------------------------------------------- +# This test case is specially designed so that during a savepoint +# rollback, a new cache entry must be allocated (see comments surrounding +# the call to sqlite3PagerAcquire() from within pager_playback_one_page() +# for details). Test the effects of injecting an OOM at this point. +# +do_test pagerfault-9-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(x); + CREATE TABLE t2(y); + CREATE TABLE t3(z); + + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + DELETE FROM t1; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-9.1 -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + DROP TABLE t3; + DROP TABLE t2; + SAVEPOINT abc; + PRAGMA incremental_vacuum; + } +} -body { + execsql { + ROLLBACK TO abc; + COMMIT; + PRAGMA freelist_count + } +} -test { + faultsim_test_result {0 2} + faultsim_integrity_check + + set sl [db one { SELECT COALESCE(sum(length(x)), 'null') FROM t1 }] + if {$sl!="null" && $sl!=1800} { + error "Content looks no good... ($sl)" + } +} + +#------------------------------------------------------------------------- +# Test fault injection with a temporary database file. +# +foreach v {a b} { + do_faultsim_test pagerfault-10$v -prep { + sqlite3 db "" + db func a_string a_string; + execsql { + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE xx(a, b, UNIQUE(a, b)); + INSERT INTO xx VALUES(a_string(200), a_string(200)); + INSERT INTO xx SELECT a_string(200), a_string(200) FROM xx; + INSERT INTO xx SELECT a_string(200), a_string(200) FROM xx; + INSERT INTO xx SELECT a_string(200), a_string(200) FROM xx; + INSERT INTO xx SELECT a_string(200), a_string(200) FROM xx; + COMMIT; + } + } -body { + execsql { UPDATE xx SET a = a_string(300) } + } -test { + faultsim_test_result {0 {}} + if {$::v == "b"} { execsql { PRAGMA journal_mode = TRUNCATE } } + faultsim_integrity_check + faultsim_integrity_check + } +} + +#------------------------------------------------------------------------- +# Test fault injection with transaction savepoints (savepoints created +# when a SAVEPOINT command is executed outside of any other savepoint +# or transaction context). +# +do_test pagerfault-9-pre1 { + faultsim_delete_and_reopen + db func a_string a_string; + execsql { + PRAGMA auto_vacuum = on; + CREATE TABLE t1(x UNIQUE); + CREATE TABLE t2(y UNIQUE); + CREATE TABLE t3(z UNIQUE); + BEGIN; + INSERT INTO t1 VALUES(a_string(202)); + INSERT INTO t2 VALUES(a_string(203)); + INSERT INTO t3 VALUES(a_string(204)); + INSERT INTO t1 SELECT a_string(202) FROM t1; + INSERT INTO t1 SELECT a_string(203) FROM t1; + INSERT INTO t1 SELECT a_string(204) FROM t1; + INSERT INTO t1 SELECT a_string(205) FROM t1; + INSERT INTO t2 SELECT a_string(length(x)) FROM t1; + INSERT INTO t3 SELECT a_string(length(x)) FROM t1; + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-11 -prep { + faultsim_restore_and_reopen + execsql { PRAGMA cache_size = 10 } +} -body { + execsql { + SAVEPOINT trans; + UPDATE t2 SET y = y||'2'; + INSERT INTO t3 SELECT * FROM t2; + DELETE FROM t1; + ROLLBACK TO trans; + UPDATE t1 SET x = x||'3'; + INSERT INTO t2 SELECT * FROM t1; + DELETE FROM t3; + RELEASE trans; + } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + + +#------------------------------------------------------------------------- +# Test fault injection when writing to a database file that resides on +# a file-system with a sector-size larger than the database page-size. +# +do_test pagerfault-12-pre1 { + testvfs ss_layer -default 1 + ss_layer sectorsize 4096 + faultsim_delete_and_reopen + db func a_string a_string; + + execsql { + PRAGMA page_size = 1024; + PRAGMA journal_mode = PERSIST; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(x, y UNIQUE); + INSERT INTO t1 VALUES(a_string(333), a_string(444)); + INSERT INTO t1 SELECT a_string(333+rowid), a_string(444+rowid) FROM t1; + INSERT INTO t1 SELECT a_string(333+rowid), a_string(444+rowid) FROM t1; + INSERT INTO t1 SELECT a_string(333+rowid), a_string(444+rowid) FROM t1; + INSERT INTO t1 SELECT a_string(333+rowid), a_string(444+rowid) FROM t1; + INSERT INTO t1 SELECT a_string(44), a_string(55) FROM t1 LIMIT 13; + COMMIT; + } + faultsim_save_and_close +} {} + +do_faultsim_test pagerfault-12a -prep { + faultsim_restore_and_reopen + execsql { PRAGMA cache_size = 10 } + db func a_string a_string; +} -body { + execsql { + UPDATE t1 SET x = a_string(length(x)), y = a_string(length(y)); + } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +do_test pagerfault-12-pre2 { + faultsim_restore_and_reopen + execsql { + CREATE TABLE t2 AS SELECT * FROM t1 LIMIT 10; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-12b -prep { + faultsim_restore_and_reopen + db func a_string a_string; + execsql { SELECT * FROM t1 } +} -body { + set sql(1) { UPDATE t2 SET x = a_string(280) } + set sql(2) { UPDATE t1 SET x = a_string(280) WHERE rowid = 5 } + + db eval { SELECT rowid FROM t1 LIMIT 2 } { db eval $sql($rowid) } + +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + +catch { db close } +ss_layer delete + + +#------------------------------------------------------------------------- +# Test fault injection when SQLite opens a database where the size of the +# database file is zero bytes but the accompanying journal file is larger +# than that. In this scenario SQLite should delete the journal file +# without rolling it back, even if it is in all other respects a valid +# hot-journal file. +# +do_test pagerfault-13-pre1 { + faultsim_delete_and_reopen + db func a_string a_string; + execsql { + PRAGMA journal_mode = PERSIST; + BEGIN; + CREATE TABLE t1(x, y UNIQUE); + INSERT INTO t1 VALUES(a_string(333), a_string(444)); + COMMIT; + } + db close + file delete -force test.db + faultsim_save +} {} +do_faultsim_test pagerfault-13 -prep { + faultsim_restore_and_reopen +} -body { + execsql { CREATE TABLE xx(a, b) } +} -test { + faultsim_test_result {0 {}} +} + +#--------------------------------------------------------------------------- +# Test fault injection into a small backup operation. +# +do_test pagerfault-14-pre1 { + faultsim_delete_and_reopen + db func a_string a_string; + execsql { + PRAGMA journal_mode = PERSIST; + ATTACH 'test.db2' AS two; + BEGIN; + CREATE TABLE t1(x, y UNIQUE); + CREATE TABLE two.t2(x, y UNIQUE); + INSERT INTO t1 VALUES(a_string(333), a_string(444)); + INSERT INTO t2 VALUES(a_string(333), a_string(444)); + COMMIT; + } + faultsim_save_and_close +} {} + +do_faultsim_test pagerfault-14a -prep { + faultsim_restore_and_reopen +} -body { + if {[catch {db backup test.db2} msg]} { error [regsub {.*: } $msg {}] } +} -test { + faultsim_test_result {0 {}} {1 {}} {1 {SQL logic error or missing database}} +} +do_faultsim_test pagerfault-14b -prep { + catch { db2 close } + faultsim_restore_and_reopen + sqlite3 db2 "" + db2 eval { PRAGMA page_size = 4096; CREATE TABLE xx(a) } +} -body { + sqlite3_backup B db2 main db main + B step 200 + set rc [B finish] + if {[string match SQLITE_IOERR_* $rc]} {set rc SQLITE_IOERR} + if {$rc != "SQLITE_OK"} { error [sqlite3_test_errstr $rc] } + set {} {} +} -test { + faultsim_test_result {0 {}} +} +do_faultsim_test pagerfault-14c -prep { + catch { db2 close } + faultsim_restore_and_reopen + sqlite3 db2 test.db2 + db2 eval { + PRAGMA synchronous = off; + PRAGMA page_size = 4096; + CREATE TABLE xx(a); + } +} -body { + sqlite3_backup B db2 main db main + B step 200 + set rc [B finish] + if {[string match SQLITE_IOERR_* $rc]} {set rc SQLITE_IOERR} + if {$rc != "SQLITE_OK"} { error [sqlite3_test_errstr $rc] } + set {} {} +} -test { + faultsim_test_result {0 {}} +} + +do_test pagerfault-15-pre1 { + faultsim_delete_and_reopen + db func a_string a_string; + execsql { + BEGIN; + CREATE TABLE t1(x, y UNIQUE); + INSERT INTO t1 VALUES(a_string(11), a_string(22)); + INSERT INTO t1 VALUES(a_string(11), a_string(22)); + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-15 -prep { + faultsim_restore_and_reopen + db func a_string a_string; +} -body { + db eval { SELECT * FROM t1 LIMIT 1 } { + execsql { + BEGIN; INSERT INTO t1 VALUES(a_string(333), a_string(555)); COMMIT; + BEGIN; INSERT INTO t1 VALUES(a_string(333), a_string(555)); COMMIT; + } + } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} + + +do_test pagerfault-16-pre1 { + faultsim_delete_and_reopen + execsql { CREATE TABLE t1(x, y UNIQUE) } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-16 -prep { + faultsim_restore_and_reopen +} -body { + execsql { + PRAGMA locking_mode = exclusive; + PRAGMA journal_mode = wal; + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + PRAGMA journal_mode = delete; + INSERT INTO t1 VALUES(4, 5); + PRAGMA journal_mode = wal; + INSERT INTO t1 VALUES(6, 7); + PRAGMA journal_mode = persist; + INSERT INTO t1 VALUES(8, 9); + } +} -test { + faultsim_test_result {0 {exclusive wal delete wal persist}} + faultsim_integrity_check +} + + +#------------------------------------------------------------------------- +# Test fault injection while changing into and out of WAL mode. +# +do_test pagerfault-17-pre1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1862, 'Botha'); + INSERT INTO t1 VALUES(1870, 'Smuts'); + INSERT INTO t1 VALUES(1866, 'Hertzog'); + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-17a -prep { + faultsim_restore_and_reopen +} -body { + execsql { + PRAGMA journal_mode = wal; + PRAGMA journal_mode = delete; + } +} -test { + faultsim_test_result {0 {wal delete}} + faultsim_integrity_check +} +do_faultsim_test pagerfault-17b -prep { + faultsim_restore_and_reopen + execsql { PRAGMA synchronous = OFF } +} -body { + execsql { + PRAGMA journal_mode = wal; + INSERT INTO t1 VALUES(22, 'Clarke'); + PRAGMA journal_mode = delete; + } +} -test { + faultsim_test_result {0 {wal delete}} + faultsim_integrity_check +} +do_faultsim_test pagerfault-17c -prep { + faultsim_restore_and_reopen + execsql { + PRAGMA locking_mode = exclusive; + PRAGMA journal_mode = wal; + } +} -body { + execsql { PRAGMA journal_mode = delete } +} -test { + faultsim_test_result {0 delete} + faultsim_integrity_check +} +do_faultsim_test pagerfault-17d -prep { + catch { db2 close } + faultsim_restore_and_reopen + sqlite3 db2 test.db + execsql { PRAGMA journal_mode = delete } + execsql { PRAGMA journal_mode = wal } + execsql { INSERT INTO t1 VALUES(99, 'Bradman') } db2 +} -body { + execsql { PRAGMA journal_mode = delete } +} -test { + faultsim_test_result {1 {database is locked}} + faultsim_integrity_check +} +do_faultsim_test pagerfault-17e -prep { + catch { db2 close } + faultsim_restore_and_reopen + sqlite3 db2 test.db + execsql { PRAGMA journal_mode = delete } + execsql { PRAGMA journal_mode = wal } + set ::chan [launch_testfixture] + testfixture $::chan { + sqlite3 db test.db + db eval { INSERT INTO t1 VALUES(101, 'Latham') } + } + catch { testfixture $::chan sqlite_abort } + catch { close $::chan } +} -body { + execsql { PRAGMA journal_mode = delete } +} -test { + faultsim_test_result {0 delete} + faultsim_integrity_check +} + +#------------------------------------------------------------------------- +# Test fault-injection when changing from journal_mode=persist to +# journal_mode=delete (this involves deleting the journal file). +# +do_test pagerfault-18-pre1 { + faultsim_delete_and_reopen + execsql { + CREATE TABLE qq(x); + INSERT INTO qq VALUES('Herbert'); + INSERT INTO qq VALUES('Macalister'); + INSERT INTO qq VALUES('Mackenzie'); + INSERT INTO qq VALUES('Lilley'); + INSERT INTO qq VALUES('Palmer'); + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-18 -prep { + faultsim_restore_and_reopen + execsql { + PRAGMA journal_mode = PERSIST; + INSERT INTO qq VALUES('Beatty'); + } +} -body { + execsql { PRAGMA journal_mode = delete } +} -test { + faultsim_test_result {0 delete} + faultsim_integrity_check +} + +do_faultsim_test pagerfault-19a -prep { + sqlite3 db :memory: + db func a_string a_string + execsql { + PRAGMA auto_vacuum = FULL; + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(a_string(5000), a_string(6000)); + COMMIT; + } +} -body { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 SELECT * FROM t1; + DELETE FROM t1; + } +} -test { + faultsim_test_result {0 {}} +} + +do_test pagerfault-19-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA auto_vacuum = FULL; + CREATE TABLE t1(x); INSERT INTO t1 VALUES(1); + CREATE TABLE t2(x); INSERT INTO t2 VALUES(2); + CREATE TABLE t3(x); INSERT INTO t3 VALUES(3); + CREATE TABLE t4(x); INSERT INTO t4 VALUES(4); + CREATE TABLE t5(x); INSERT INTO t5 VALUES(5); + CREATE TABLE t6(x); INSERT INTO t6 VALUES(6); + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-19b -prep { + faultsim_restore_and_reopen +} -body { + execsql { + BEGIN; + UPDATE t4 SET x = x+1; + UPDATE t6 SET x = x+1; + SAVEPOINT one; + UPDATE t3 SET x = x+1; + SAVEPOINT two; + DROP TABLE t2; + ROLLBACK TO one; + COMMIT; + SELECT * FROM t3; + SELECT * FROM t4; + SELECT * FROM t6; + } +} -test { + faultsim_test_result {0 {3 5 7}} +} + +#------------------------------------------------------------------------- +# This tests fault-injection in a special case in the auto-vacuum code. +# +do_test pagerfault-20-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA cache_size = 10; + PRAGMA auto_vacuum = FULL; + CREATE TABLE t0(a, b); + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-20 -prep { + faultsim_restore_and_reopen +} -body { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + DROP TABLE t1; + COMMIT; + } +} -test { + faultsim_test_result {0 {}} +} + +do_test pagerfault-21-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA cache_size = 10; + CREATE TABLE t0(a PRIMARY KEY, b); + INSERT INTO t0 VALUES(1, 2); + } + faultsim_save_and_close +} {} +do_faultsim_test pagerfault-21 -prep { + faultsim_restore_and_reopen +} -body { + db eval { SELECT * FROM t0 LIMIT 1 } { + db eval { INSERT INTO t0 SELECT a+1, b FROM t0 } + db eval { INSERT INTO t0 SELECT a+2, b FROM t0 } + } +} -test { + faultsim_test_result {0 {}} +} + + +#------------------------------------------------------------------------- +# Test fault-injection and rollback when the nReserve header value +# is non-zero. +# +do_test pagerfault-21-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + PRAGMA journal_mode = DELETE; + } + db close + hexio_write test.db 20 10 + hexio_write test.db 105 03F0 + sqlite3 db test.db + db func a_string a_string + execsql { + CREATE TABLE t0(a PRIMARY KEY, b UNIQUE); + INSERT INTO t0 VALUES(a_string(222), a_string(333)); + INSERT INTO t0 VALUES(a_string(223), a_string(334)); + INSERT INTO t0 VALUES(a_string(224), a_string(335)); + INSERT INTO t0 VALUES(a_string(225), a_string(336)); + } + faultsim_save_and_close +} {} + +do_faultsim_test pagerfault-21 -prep { + faultsim_restore_and_reopen +} -body { + execsql { INSERT INTO t0 SELECT a||'x', b||'x' FROM t0 } +} -test { + faultsim_test_result {0 {}} + faultsim_integrity_check +} +ifcapable crashtest { + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + PRAGMA journal_mode = DELETE; + } + db close + hexio_write test.db 20 10 + hexio_write test.db 105 03F0 + + sqlite3 db test.db + db func a_string a_string + execsql { + CREATE TABLE t0(a PRIMARY KEY, b UNIQUE); + INSERT INTO t0 VALUES(a_string(222), a_string(333)); + INSERT INTO t0 VALUES(a_string(223), a_string(334)); + } + faultsim_save_and_close + + for {set iTest 1} {$iTest<50} {incr iTest} { + do_test pagerfault-21.crash.$iTest.1 { + crashsql -delay 1 -file test.db -seed $iTest { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b UNIQUE); + INSERT INTO t1 SELECT a, b FROM t0; + COMMIT; + } + } {1 {child process exited abnormally}} + do_test pagerfault-22.$iTest.2 { + sqlite3 db test.db + execsql { PRAGMA integrity_check } + } {ok} + db close + } +} + +finish_test diff --git a/test/pagerfault2.test b/test/pagerfault2.test new file mode 100644 index 0000000..6cdb99a --- /dev/null +++ b/test/pagerfault2.test @@ -0,0 +1,99 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The tests in this file test the pager modules response to various +# fault conditions (OOM, IO error, disk full etc.). They are similar +# to those in file pagerfault1.test. +# +# More specifically, the tests in this file are those deemed too slow to +# run as part of pagerfault1.test. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +if {[permutation] == "inmemory_journal"} { + finish_test + return +} + +sqlite3_memdebug_vfs_oom_test 0 + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} +db func a_string a_string + +do_test pagerfault2-1-pre1 { + faultsim_delete_and_reopen + db func a_string a_string + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA journal_mode = DELETE; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(a_string(401), a_string(402)); + } + for {set ii 0} {$ii < 13} {incr ii} { + execsql { INSERT INTO t1 SELECT a_string(401), a_string(402) FROM t1 } + } + faultsim_save_and_close + file size test.db +} [expr 1024 * 8268] + +do_faultsim_test pagerfault2-1 -faults oom-transient -prep { + faultsim_restore_and_reopen + sqlite3_db_config_lookaside db 0 256 4096 + execsql { + BEGIN; + SELECT * FROM t1; + INSERT INTO t1 VALUES(5, 6); + SAVEPOINT abc; + UPDATE t1 SET a = a||'x' WHERE rowid<3700; + } +} -body { + execsql { UPDATE t1 SET a = a||'x' WHERE rowid>=3700 AND rowid<=4200 } + execsql { ROLLBACK TO abc } +} -test { + faultsim_test_result {0 {}} +} + +do_test pagerfault2-2-pre1 { + faultsim_restore_and_reopen + execsql { DELETE FROM t1 } + faultsim_save_and_close +} {} + +do_faultsim_test pagerfault2-2 -faults oom-transient -prep { + faultsim_restore_and_reopen + sqlite3_db_config_lookaside db 0 256 4096 + db func a_string a_string + + execsql { + PRAGMA cache_size = 20; + BEGIN; + INSERT INTO t1 VALUES(a_string(401), a_string(402)); + SAVEPOINT abc; + } +} -body { + execsql { INSERT INTO t1 VALUES (a_string(2000000), a_string(2500000)) } +} -test { + faultsim_test_result {0 {}} +} + +sqlite3_memdebug_vfs_oom_test 1 +finish_test + diff --git a/test/pageropt.test b/test/pageropt.test index c28b97c..1c634f6 100644 --- a/test/pageropt.test +++ b/test/pageropt.test @@ -194,6 +194,6 @@ do_test pageropt-4.2 { } } {12 3 3} -sqlite3_soft_heap_limit $soft_limit +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) catch {db2 close} finish_test diff --git a/test/pcache.test b/test/pcache.test index 7c0951e..e2bcd51 100644 --- a/test/pcache.test +++ b/test/pcache.test @@ -16,6 +16,10 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec # The pcache module limits the number of pages available to purgeable # caches to the sum of the 'cache_size' values for the set of open diff --git a/test/permutations.test b/test/permutations.test index c4a92b7..f1fb74f 100644 --- a/test/permutations.test +++ b/test/permutations.test @@ -9,172 +9,196 @@ # #*********************************************************************** # -# $Id: permutations.test,v 1.51 2009/07/01 18:09:02 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +db close -# Argument processing. -# -#puts "PERM-DEBUG: argv=$argv" -namespace eval ::perm { - variable testmode [lindex $::argv 0] - variable testfile [lindex $::argv 1] -} -set argv [lrange $argv 2 end] -#puts "PERM-DEBUG: testmode=$::perm::testmode tstfile=$::perm::testfile" - -set ::permutations_presql "" -set ::permutations_test_prefix "" - -if {$::perm::testmode eq "veryquick"} { - set ::perm::testmode [list persistent_journal no_journal] - set ISQUICK 1 -} -if {$::perm::testmode eq "quick"} { - set ::perm::testmode [list persistent_journal no_journal autovacuum_ioerr] - set ISQUICK 1 -} -if {$::perm::testmode eq "all" || $::perm::testmode eq ""} { - set ::perm::testmode { - memsubsys1 memsubsys2 singlethread multithread onefile utf16 exclusive - persistent_journal persistent_journal_error no_journal no_journal_error - autovacuum_ioerr no_mutex_try fullmutex journaltest inmemory_journal - pcache0 pcache10 pcache50 pcache90 pcache100 - } -} -if {$::perm::testmode eq "targets"} { - puts "" - puts -nonewline "veryquick " - puts "Same as persistent_journal and no_journal" - puts -nonewline "quick " - puts "Same as persistent_journal, no_journal and autovacuum_ioerr" - puts -nonewline "all " - puts "Everything except autovacuum_crash" -} -#puts "PERM-DEBUG: testmode=$::perm::testmode" - -set EXCLUDE { - all.test in2.test onefile.test - async2.test incrvacuum_ioerr.test permutations.test - async.test jrnlmode2.test quick.test - autovacuum_crash.test jrnlmode3.test shared_err.test - autovacuum_ioerr.test jrnlmode4.test soak.test - btree8.test loadext.test speed1p.test - corrupt.test malloc2.test speed1.test - crash2.test malloc3.test speed2.test - crash3.test malloc4.test speed3.test - crash4.test mallocAll.test speed4p.test - crash6.test malloc.test speed4.test - crash7.test memleak.test sqllimits1.test - crash.test memsubsys1.test thread001.test - exclusive3.test memsubsys2.test thread002.test - fts3.test misc7.test utf16.test - fuzz_malloc.test misuse.test veryquick.test - fuzz.test mutex2.test vtab_err.test - lookaside.test fuzz3.test savepoint4.test - savepoint6.test -} -set ALLTESTS [list] -foreach filename [glob $testdir/*.test] { - set filename [file tail $filename] - if {[lsearch $EXCLUDE $filename] < 0} { lappend ALLTESTS $filename } -} -set ALLTESTS [lsort $ALLTESTS] - -rename finish_test really_finish_test2 -proc finish_test {} {} - -rename do_test really_do_test - -proc do_test {name args} { - eval really_do_test [list "perm-$::permutations_test_prefix.$name"] $args -} - -# Overload the [sqlite3] command -rename sqlite3 really_sqlite3 -proc sqlite3 {args} { - set r [eval really_sqlite3 $args] - if { [llength $args] == 2 && $::permutations_presql ne "" } { - [lindex $args 0] eval $::permutations_presql - } - set r -} - -# run_tests OPTIONS +#------------------------------------------------------------------------- +# test_suite NAME OPTIONS # # where available options are: # +# -description TITLE (default "") # -initialize SCRIPT (default "") # -shutdown SCRIPT (default "") -# -include LIST-OF-FILES (default $::ALLTESTS) -# -exclude LIST-OF-FILES (default "") # -presql SQL (default "") -# -description TITLE (default "") +# -files LIST-OF-FILES (default $::ALLTESTS) +# -prefix NAME (default "$::NAME.") # -proc run_tests {name args} { - set ::permutations_test_prefix $name - set options(-shutdown) "" - set options(-initialize) "" - set options(-exclude) "" - set options(-include) $::ALLTESTS - set options(-presql) "" - set options(-description) "no description supplied (fixme)" - array set options $args - #puts "PERM-DEBUG: name=$name testfile=$::perm::testfile" - #puts "PERM-DEBUG: [array get options]" +proc test_suite {name args} { - if {$::perm::testmode eq "targets"} { - puts [format "% -20s %s" $name [string trim $options(-description)]] - return + set default(-shutdown) "" + set default(-initialize) "" + set default(-presql) "" + set default(-description) "no description supplied (fixme)" + set default(-files) "" + set default(-prefix) "${name}." + + array set options [array get default] + if {[llength $args]%2} { + error "uneven number of options/switches passed to test_suite" } - if {$::perm::testmode ne "" && [lsearch $::perm::testmode $name]<0} { - puts "skipping permutation test $name..." - return + foreach {k v} $args { + set o [array names options ${k}*] + if {[llength $o]>1} { error "ambiguous option: $k" } + if {[llength $o]==0} { error "unknown option: $k" } + set options([lindex $o 0]) $v } - uplevel $options(-initialize) - set ::permutations_presql $options(-presql) + set ::testspec($name) [array get options] + lappend ::testsuitelist $name - foreach file [lsort $options(-include)] { - if {[lsearch $options(-exclude) $file] < 0 && - ( $::perm::testfile eq "" || - $::perm::testfile eq $file || - "$::perm::testfile.test" eq $file ) - } { - set ::perm::shared_cache_setting [shared_cache_setting] - uplevel source $::testdir/$file - if {$::perm::shared_cache_setting ne [shared_cache_setting]} { - error "File $::testdir/$file changed the shared cache setting from $::perm::shared_cache_setting to [shared_cache_setting]" +} + +#------------------------------------------------------------------------- +# test_set ARGS... +# +proc test_set {args} { + set isExclude 0 + foreach a $args { + if {[string match -* $a]} { + switch -- $a { + -include { set isExclude 0 } + -exclude { set isExclude 1 } + default { + error "Unknown switch: $a" + } } + } elseif {$isExclude == 0} { + foreach f $a { set t($f) 1 } } else { - # puts "skipping file $file" + foreach f $a { array unset t $f } } } - uplevel $options(-shutdown) - set ::permutations_test_prefix "" + return [array names t] } -proc shared_cache_setting {} { - set ret 0 - catch { - set ret [sqlite3_enable_shared_cache] - } - return $ret +#------------------------------------------------------------------------- +# Set up the following global list variables containing the names of +# various test scripts: +# +# $alltests +# $allquicktests +# +set alltests [list] +foreach f [glob $testdir/*.test] { lappend alltests [file tail $f] } +if {$::tcl_platform(platform)!="unix"} { + set alltests [test_set $alltests -exclude crash.test crash2.test] +} +set alltests [test_set $alltests -exclude { + all.test async.test quick.test veryquick.test + memleak.test permutations.test soak.test fts3.test + mallocAll.test +}] + +set allquicktests [test_set $alltests -exclude { + async2.test async3.test backup_ioerr.test corrupt.test + corruptC.test crash.test crash2.test crash3.test crash4.test crash5.test + crash6.test crash7.test delete3.test e_fts3.test fts3rnd.test + fkey_malloc.test fuzz.test fuzz3.test fuzz_malloc.test in2.test loadext.test + misc7.test mutex2.test notify2.test onefile.test pagerfault2.test + savepoint4.test savepoint6.test select9.test + speed1.test speed1p.test speed2.test speed3.test speed4.test + speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test + thread003.test thread004.test thread005.test trans2.test vacuum3.test + incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test + vtab_err.test walslow.test walcrash.test + walthread.test +}] +if {[info exists ::env(QUICKTEST_INCLUDE)]} { + set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)] } ############################################################################# # Start of tests +# + +#------------------------------------------------------------------------- +# Define the generic test suites: +# +# veryquick +# quick +# full +# +lappend ::testsuitelist xxx + +test_suite "veryquick" -prefix "" -description { + "Very" quick test suite. Runs in less than 5 minutes on a workstation. + This test suite is the same as the "quick" tests, except that some files + that test malloc and IO errors are omitted. +} -files [ + test_set $allquicktests -exclude *malloc* *ioerr* *fault* +] + +test_suite "quick" -prefix "" -description { + Quick test suite. Runs in around 10 minutes on a workstation. +} -files [ + test_set $allquicktests +] + +test_suite "full" -prefix "" -description { + Full test suite. Takes a long time. +} -files [ + test_set $alltests +] -initialize { + unset -nocomplain ::G(isquick) +} + +test_suite "threads" -prefix "" -description { + All multi-threaded tests. +} -files { + notify2.test thread001.test thread002.test thread003.test + thread004.test thread005.test walthread.test +} + +test_suite "fts3" -prefix "" -description { + All FTS3 tests except fts3malloc.test and fts3rnd.test. +} -files { + fts3aa.test fts3ab.test fts3ac.test fts3ad.test fts3ae.test + fts3af.test fts3ag.test fts3ah.test fts3ai.test fts3aj.test + fts3ak.test fts3al.test fts3am.test fts3an.test fts3ao.test + fts3atoken.test fts3b.test fts3c.test fts3cov.test fts3d.test + fts3e.test fts3expr.test fts3expr2.test fts3near.test + fts3query.test fts3snippet.test +} + + +lappend ::testsuitelist xxx +#------------------------------------------------------------------------- +# Define the coverage related test suites: +# +# coverage-wal +# +test_suite "coverage-wal" -description { + Coverage tests for file wal.c. +} -files { + wal.test wal2.test wal3.test walmode.test + walbak.test walhook.test walcrash2.test walcksum.test + walfault.test walbig.test +} + +test_suite "coverage-pager" -description { + Coverage tests for file pager.c. +} -files { + pager1.test pager2.test pagerfault.test pagerfault2.test + walfault.test walbak.test journal2.test tkt-9d68c883.test +} + + +lappend ::testsuitelist xxx +#------------------------------------------------------------------------- +# Define the permutation test suites: +# # Run some tests using pre-allocated page and scratch blocks. # -run_tests "memsubsys1" -description { +test_suite "memsubsys1" -description { Tests using pre-allocated page and scratch blocks -} -exclude { - ioerr5.test - malloc5.test -} -initialize { +} -files [ + test_set $::allquicktests -exclude ioerr5.test malloc5.test +] -initialize { catch {db close} sqlite3_shutdown sqlite3_config_pagecache 4096 24 @@ -197,12 +221,11 @@ run_tests "memsubsys1" -description { # sqlite3_soft_heap_limit() and sqlite3_release_memory() functionality. # This functionality is disabled if a pre-allocated page block is provided. # -run_tests "memsubsys2" -description { +test_suite "memsubsys2" -description { Tests using small pre-allocated page and scratch blocks -} -exclude { - ioerr5.test - malloc5.test -} -initialize { +} -files [ + test_set $::allquicktests -exclude ioerr5.test malloc5.test +] -initialize { catch {db close} sqlite3_shutdown sqlite3_config_pagecache 512 5 @@ -220,7 +243,7 @@ run_tests "memsubsys2" -description { # Run all tests with the lookaside allocator disabled. # -run_tests "nolookaside" -description { +test_suite "nolookaside" -description { OOM tests with lookaside disabled } -initialize { catch {db close} @@ -234,11 +257,11 @@ run_tests "nolookaside" -description { sqlite3_config_lookaside 100 500 sqlite3_initialize autoinstall_test_functions -} +} -files $::allquicktests # Run some tests in SQLITE_CONFIG_SINGLETHREAD mode. # -run_tests "singlethread" -description { +test_suite "singlethread" -description { Tests run in SQLITE_CONFIG_SINGLETHREAD mode } -initialize { catch {db close} @@ -246,7 +269,7 @@ run_tests "singlethread" -description { catch {sqlite3_config singlethread} sqlite3_initialize autoinstall_test_functions -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test types.test types2.test types3.test @@ -258,7 +281,7 @@ run_tests "singlethread" -description { autoinstall_test_functions } -run_tests "nomutex" -description { +test_suite "nomutex" -description { Tests run with the SQLITE_OPEN_MULTITHREADED flag passed to sqlite3_open(). } -initialize { rename sqlite3 sqlite3_nomutex @@ -268,7 +291,7 @@ run_tests "nomutex" -description { } uplevel [concat sqlite3_nomutex $args] } -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test types.test types2.test types3.test @@ -279,7 +302,7 @@ run_tests "nomutex" -description { # Run some tests in SQLITE_CONFIG_MULTITHREAD mode. # -run_tests "multithread" -description { +test_suite "multithread" -description { Tests run in SQLITE_CONFIG_MULTITHREAD mode } -initialize { catch {db close} @@ -287,7 +310,7 @@ run_tests "multithread" -description { catch {sqlite3_config multithread} sqlite3_initialize autoinstall_test_functions -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test types.test types2.test types3.test @@ -301,7 +324,7 @@ run_tests "multithread" -description { # Run some tests in SQLITE_OPEN_FULLMUTEX mode. # -run_tests "fullmutex" -description { +test_suite "fullmutex" -description { Tests run in SQLITE_OPEN_FULLMUTEX mode } -initialize { rename sqlite3 sqlite3_fullmutex @@ -311,7 +334,7 @@ run_tests "fullmutex" -description { } uplevel [concat sqlite3_fullmutex $args] } -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test types.test types2.test types3.test @@ -322,7 +345,7 @@ run_tests "fullmutex" -description { # Run some tests using the "onefile" demo. # -run_tests "onefile" -description { +test_suite "onefile" -description { Run some tests using the "test_onefile.c" demo } -initialize { rename sqlite3 sqlite3_onefile @@ -332,7 +355,7 @@ run_tests "onefile" -description { } uplevel [concat sqlite3_onefile $args] } -} -include { +} -files { conflict.test insert.test insert2.test insert3.test rollback.test select1.test select2.test select3.test } -shutdown { @@ -342,11 +365,11 @@ run_tests "onefile" -description { # Run some tests using UTF-16 databases. # -run_tests "utf16" -description { +test_suite "utf16" -description { Run tests using UTF-16 databases } -presql { pragma encoding = 'UTF-16' -} -include { +} -files { alter.test alter3.test auth.test bind.test blob.test capi2.test capi3.test collate1.test collate2.test collate3.test collate4.test collate5.test collate6.test @@ -365,45 +388,45 @@ run_tests "utf16" -description { # Run some tests in exclusive locking mode. # -run_tests "exclusive" -description { +test_suite "exclusive" -description { Run tests in exclusive locking mode. } -presql { pragma locking_mode = 'exclusive' -} -include { +} -files { rollback.test select1.test select2.test malloc.test ioerr.test } # Run some tests in exclusive locking mode with truncated journals. # -run_tests "exclusive-truncate" -description { +test_suite "exclusive-truncate" -description { Run tests in exclusive locking mode and truncate journal mode. } -presql { pragma locking_mode = 'exclusive'; pragma journal_mode = TRUNCATE; -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test update.test malloc.test ioerr.test } # Run some tests in persistent journal mode. # -run_tests "persistent_journal" -description { +test_suite "persistent_journal" -description { Run tests in persistent-journal mode. } -presql { pragma journal_mode = persist -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test } # Run some tests in truncating journal mode. # -run_tests "truncate_journal" -description { +test_suite "truncate_journal" -description { Run tests in persistent-journal mode. } -presql { pragma journal_mode = truncate -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test malloc.test ioerr.test @@ -411,76 +434,85 @@ run_tests "truncate_journal" -description { # Run some error tests in persistent journal mode. # -run_tests "persistent_journal_error" -description { +test_suite "persistent_journal_error" -description { Run malloc.test and ioerr.test in persistent-journal mode. } -presql { pragma journal_mode = persist -} -include { +} -files { malloc.test ioerr.test } # Run some tests in no journal mode. # -run_tests "no_journal" -description { +test_suite "no_journal" -description { Run tests in no-journal mode. } -presql { pragma journal_mode = persist -} -include { +} -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test } # Run some error tests in no journal mode. # -run_tests "no_journal_error" -description { +test_suite "no_journal_error" -description { Run malloc.test and ioerr.test in no-journal mode. } -presql { pragma journal_mode = persist -} -include { +} -files { malloc.test ioerr.test } # Run some crash-tests in autovacuum mode. # -run_tests "autovacuum_crash" -description { +test_suite "autovacuum_crash" -description { Run crash.test in autovacuum mode. } -presql { pragma auto_vacuum = 1 -} -include crash.test +} -files crash.test # Run some ioerr-tests in autovacuum mode. # -run_tests "autovacuum_ioerr" -description { +test_suite "autovacuum_ioerr" -description { Run ioerr.test in autovacuum mode. } -presql { pragma auto_vacuum = 1 -} -include ioerr.test +} -files ioerr.test # Run tests with an in-memory journal. # -run_tests "inmemory_journal" -description { +test_suite "inmemory_journal" -description { Run tests with an in-memory journal file. } -presql { pragma journal_mode = 'memory' -} -exclude { +} -files [test_set $::allquicktests -exclude { # Exclude all tests that simulate IO errors. autovacuum_ioerr2.test incrvacuum_ioerr.test ioerr.test ioerr.test ioerr2.test ioerr3.test ioerr4.test ioerr5.test vacuum3.test incrblob_err.test diskfull.test backup_ioerr.test + e_fts3.test fts3cov.test fts3malloc.test fts3rnd.test + fts3snippet.test # Exclude test scripts that use tcl IO to access journal files or count # the number of fsync() calls. pager.test exclusive.test jrnlmode.test sync.test misc1.test journal1.test conflict.test crash8.test tkt3457.test io.test + journal3.test + + pager1.test async4.test corrupt.test filefmt.test pager2.test + corrupt5.test corruptA.test pageropt.test # Exclude stmt.test, which expects sub-journals to use temporary files. stmt.test -} + + # WAL mode is different. + wal* +}] ifcapable mem3 { - run_tests "memsys3" -description { + test_suite "memsys3" -description { Run tests using the allocator in mem3.c. - } -exclude { + } -files [test_set $::allquicktests -exclude { autovacuum.test delete3.test manydb.test bigrow.test incrblob2.test memdb.test bitvec.test index2.test memsubsys1.test @@ -488,7 +520,7 @@ ifcapable mem3 { capi3.test join3.test pagesize.test collate5.test limit.test backup_ioerr.test backup_malloc.test - } -initialize { + }] -initialize { catch {db close} sqlite3_reset_auto_extension sqlite3_shutdown @@ -516,16 +548,16 @@ ifcapable mem3 { } ifcapable mem5 { - run_tests "memsys5" -description { + test_suite "memsys5" -description { Run tests using the allocator in mem5.c. - } -exclude { + } -files [test_set $::allquicktests -exclude { autovacuum.test delete3.test manydb.test bigrow.test incrblob2.test memdb.test bitvec.test index2.test memsubsys1.test capi3c.test ioerr.test memsubsys2.test capi3.test join3.test pagesize.test collate5.test limit.test zeroblob.test - } -initialize { + }] -initialize { catch {db close} sqlite3_shutdown sqlite3_config_heap 25000000 64 @@ -543,9 +575,9 @@ ifcapable mem5 { autoinstall_test_functions } - run_tests "memsys5-2" -description { + test_suite "memsys5-2" -description { Run tests using the allocator in mem5.c in a different configuration. - } -include { + } -files { select1.test } -initialize { catch {db close} @@ -567,10 +599,11 @@ ifcapable mem5 { } ifcapable threadsafe { - run_tests "no_mutex_try" -description { + test_suite "no_mutex_try" -description { The sqlite3_mutex_try() interface always fails - } -exclude [concat $EXCLUDE mutex1.test mutex2.test] \ - -initialize { + } -files [ + test_set $::allquicktests -exclude mutex1.test mutex2.test + ] -initialize { catch {db close} sqlite3_shutdown install_mutex_counters 1 @@ -606,9 +639,9 @@ ifcapable threadsafe { # } -shutdown { # rename crashsql {} # rename sa_crashsql crashsql -# } -include crash.test +# } -files crash.test -run_tests "safe_append" -description { +test_suite "safe_append" -description { Run some tests on a SAFE_APPEND file-system. } -initialize { rename sqlite3 sqlite3_safeappend @@ -622,8 +655,9 @@ run_tests "safe_append" -description { } -shutdown { rename sqlite3 {} rename sqlite3_shutdown sqlite3 -} -include [lsort [concat shared_err.test $ALLTESTS]] \ - -exclude async3.test +} -files [ + test_set $::allquicktests shared_err.test -exclude async3.test +] # The set of tests to run on the alternative-pcache set perm-alt-pcache-testset { @@ -639,126 +673,142 @@ set perm-alt-pcache-testset { update.test } -run_tests "pcache0" -description { - Alternative pcache implementation without random discard -} -initialize { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 1 0 1 - sqlite3_initialize - autoinstall_test_functions -} -shutdown { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 0 0 0 - sqlite3_config_lookaside 100 500 - install_malloc_faultsim 1 - sqlite3_initialize - autoinstall_test_functions -} -include ${perm-alt-pcache-testset} - -run_tests "pcache10" -description { - Alternative pcache implementation without 10% random discard -} -initialize { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 1 50 1 - sqlite3_initialize - autoinstall_test_functions -} -shutdown { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 0 0 0 - sqlite3_initialize - autoinstall_test_functions -} -include ${perm-alt-pcache-testset} - -run_tests "pcache50" -description { - Alternative pcache implementation without 50% random discard -} -initialize { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 1 50 1 - sqlite3_initialize - autoinstall_test_functions -} -shutdown { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 0 0 0 - sqlite3_initialize - autoinstall_test_functions -} -include ${perm-alt-pcache-testset} - -run_tests "pcache90" -description { - Alternative pcache implementation without 90% random discard -} -initialize { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 1 50 1 - sqlite3_initialize - autoinstall_test_functions -} -shutdown { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 0 0 0 - sqlite3_initialize - autoinstall_test_functions -} -include ${perm-alt-pcache-testset} - -run_tests "pcache100" -description { - Alternative pcache implementation that always discards when unpinning -} -initialize { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 1 100 1 - sqlite3_initialize - autoinstall_test_functions -} -shutdown { - catch {db close} - sqlite3_shutdown - sqlite3_config_alt_pcache 0 0 0 - sqlite3_initialize - autoinstall_test_functions -} -include ${perm-alt-pcache-testset} - -run_tests "journaltest" -description { - Check that pages are synced before being written (test_journal.c). -} -initialize { - set ISQUICK 1 - catch {db close} - register_jt_vfs -default "" - #sqlite3_instvfs binarylog -default binarylog ostrace.bin -} -shutdown { - #sqlite3_instvfs destroy binarylog - unregister_jt_vfs -} -include [concat $::ALLTESTS savepoint6.test -] -exclude { - incrvacuum.test - ioerr.test - corrupt4.test - io.test - crash8.test - async4.test +foreach discard_rate {0 10 50 90 100} { + test_suite "pcache${discard_rate}" -description " + Alternative pcache implementation with ${discard_rate}% random discard + " -initialize " + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 $discard_rate 1 + sqlite3_initialize + autoinstall_test_functions + " -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_config_lookaside 100 500 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } -files ${perm-alt-pcache-testset} } +test_suite "journaltest" -description { + Check that pages are synced before being written (test_journal.c). +} -initialize { + catch {db close} + register_jt_vfs -default "" +} -shutdown { + unregister_jt_vfs +} -files [test_set $::allquicktests -exclude { + wal* incrvacuum.test ioerr.test corrupt4.test io.test crash8.test + async4.test bigfile.test +}] + +if {[info commands register_demovfs] != ""} { + test_suite "demovfs" -description { + Check that the demovfs (code in test_demovfs.c) more or less works. + } -initialize { + register_demovfs + } -shutdown { + unregister_demovfs + } -files { + insert.test insert2.test insert3.test rollback.test + select1.test select2.test select3.test + } +} + +test_suite "wal" -description { + Run tests with journal_mode=WAL +} -initialize { + set ::G(savepoint6_iterations) 100 +} -shutdown { + unset -nocomplain ::G(savepoint6_iterations) +} -files { + savepoint.test savepoint2.test savepoint6.test + trans.test avtrans.test + + fts3aa.test fts3ab.test fts3ac.test fts3ad.test + fts3ae.test fts3af.test fts3ag.test fts3ah.test + fts3ai.test fts3aj.test fts3ak.test fts3al.test + fts3am.test fts3an.test fts3ao.test fts3b.test + fts3c.test fts3d.test fts3e.test fts3query.test +} # End of tests ############################################################################# -if {$::perm::testmode eq "targets"} { puts "" ; exit } - -# Restore the [sqlite3] command. +# run_tests NAME OPTIONS # -rename sqlite3 {} -rename really_sqlite3 sqlite3 - -# Restore the [finish_test] command. +# where available options are: # -rename finish_test "" -rename really_finish_test2 finish_test - -# Restore the [do_test] command. +# -description TITLE +# -initialize SCRIPT +# -shutdown SCRIPT +# -presql SQL +# -files LIST-OF-FILES +# -prefix NAME # -rename do_test "" -rename really_do_test do_test +proc run_tests {name args} { + array set options $args + + set ::G(perm:name) $name + set ::G(perm:prefix) $options(-prefix) + set ::G(perm:presql) $options(-presql) + set ::G(isquick) 1 + + uplevel $options(-initialize) + + foreach file [lsort $options(-files)] { + slave_test_file $::testdir/$file + } + + uplevel $options(-shutdown) + + unset ::G(perm:name) + unset ::G(perm:prefix) + unset ::G(perm:presql) +} + +proc run_test_suite {name} { + if {[info exists ::testspec($name)]==0} { + error "No such test suite: $name" + } + uplevel run_tests $name $::testspec($name) +} + +proc help {} { + puts "Usage: $::argv0 TESTSUITE ?TESTFILE?" + puts "" + puts "Available test-suites are:" + foreach k $::testsuitelist { + if {[info exists ::testspec($k)]==0} { + puts " ----------------------------------------" + puts "" + } else { + array set o $::testspec($k) + puts "Test suite: \"$k\"" + set d [string trim $o(-description)] + set d [regsub {\n *} $d "\n "] + puts " $d" + puts "" + } + } + exit -1 +} + +if {[info script] == $argv0} { + proc main {argv} { + if {[llength $argv]==0} { + help + } else { + set suite [lindex $argv 0] + if {[info exists ::testspec($suite)]==0} help + set extra "" + if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] } + eval run_tests $suite $::testspec($suite) $extra + } + } + main $argv + finish_test +} -finish_test diff --git a/test/pragma.test b/test/pragma.test index 46f7d6d..3e926eb 100644 --- a/test/pragma.test +++ b/test/pragma.test @@ -17,6 +17,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec + # Test organization: # # pragma-1.*: Test cache_size, default_cache_size and synchronous on main db. @@ -317,6 +322,7 @@ ifcapable attach { puts -nonewline $out [read $in] close $in close $out + hexio_write testerr.db 28 00000000 execsql {REINDEX t2} execsql {PRAGMA integrity_check} } {ok} @@ -634,13 +640,15 @@ do_test pragma-7.1.2 { } {} } ;# ifcapable schema_pragmas ifcapable {utf16} { - do_test pragma-7.2 { - db close - sqlite3 db test.db - catchsql { - pragma encoding=bogus; - } - } {1 {unsupported encoding: bogus}} + if {[permutation] == ""} { + do_test pragma-7.2 { + db close + sqlite3 db test.db + catchsql { + pragma encoding=bogus; + } + } {1 {unsupported encoding: bogus}} + } } ifcapable tempdb { do_test pragma-7.3 { diff --git a/test/quick.test b/test/quick.test index 431b829..974b02d 100644 --- a/test/quick.test +++ b/test/quick.test @@ -6,151 +6,10 @@ #*********************************************************************** # This file runs all tests. # -# $Id: quick.test,v 1.95 2009/03/16 14:48:19 danielk1977 Exp $ - -proc lshift {lvar} { - upvar $lvar l - set ret [lindex $l 0] - set l [lrange $l 1 end] - return $ret -} -while {[set arg [lshift argv]] != ""} { - switch -- $arg { - -sharedpagercache { - sqlite3_enable_shared_cache 1 - } - -soak { - set SOAKTEST 1 - } - -start { - set STARTAT "[lshift argv]*" - } - default { - set argv [linsert $argv 0 $arg] - break - } - } -} set testdir [file dirname $argv0] -source $testdir/tester.tcl -rename finish_test really_finish_test -proc finish_test {} { - catch {db close} - show_memstats -} -set ISQUICK 1 +source $testdir/permutations.test -set EXCLUDE { - all.test - async.test - async2.test - async3.test - backup_ioerr.test - corrupt.test - corruptC.test - crash.test - crash2.test - crash3.test - crash4.test - crash5.test - crash6.test - crash7.test - delete3.test - e_fts3.test - fts3.test - fts3rnd.test - fkey_malloc.test - fuzz.test - fuzz3.test - fuzz_malloc.test - in2.test - loadext.test - memleak.test - misc7.test - misuse.test - mutex2.test - notify2.test - onefile.test - permutations.test - quick.test - savepoint4.test - savepoint6.test - select9.test - soak.test - speed1.test - speed1p.test - speed2.test - speed3.test - speed4.test - speed4p.test - sqllimits1.test - tkt2686.test - thread001.test - thread002.test - thread003.test - thread004.test - thread005.test - trans2.test - vacuum3.test +run_test_suite quick - incrvacuum_ioerr.test - autovacuum_crash.test - btree8.test - shared_err.test - vtab_err.test - veryquick.test - mallocAll.test -} - -if {[sqlite3 -has-codec]} { - # lappend EXCLUDE \ - # conflict.test -} - - -# Files to include in the test. If this list is empty then everything -# that is not in the EXCLUDE list is run. -# -set INCLUDE { -} - -# If the QUICKTEST_INCLUDE environment variable is set, then interpret -# it as a list of test files. Always run these files, even if they -# begin with "malloc*" or "ioerr*" or are part of the EXCLUDE list -# defined above. -# -set QUICKTEST_INCLUDE {} -catch { set QUICKTEST_INCLUDE $env(QUICKTEST_INCLUDE) } - -foreach testfile [lsort -dictionary [glob $testdir/*.test]] { - set tail [file tail $testfile] - if { [lsearch $QUICKTEST_INCLUDE $tail]<0 } { - # If this is "veryquick.test", do not run any of the malloc or - # IO error simulations. - if {[info exists ISVERYQUICK] && ( - [string match *malloc* $testfile] || [string match *ioerr* $testfile] - ) } { - continue - } - if {[lsearch -exact $EXCLUDE $tail]>=0} continue - } - if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue - if {[info exists STARTAT] && [string match $STARTAT $tail]} {unset STARTAT} - if {[info exists STARTAT]} continue - source $testfile - catch {db close} - if {$sqlite_open_file_count>0} { - puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail - set sqlite_open_file_count 0 - } -} -#set argv quick -#source $testdir/permutations.test -#set argv "" -source $testdir/misuse.test - -set sqlite_open_file_count 0 -really_finish_test +finish_test diff --git a/test/rdonly.test b/test/rdonly.test index a975cef..bf19597 100644 --- a/test/rdonly.test +++ b/test/rdonly.test @@ -18,6 +18,10 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Do not use a codec for tests in this file, as the database file is +# manipulated directly using tcl scripts (using the [hexio_write] command). +# +do_not_use_codec # Create a database. # @@ -29,7 +33,7 @@ do_test rdonly-1.1 { } } {1} -# Changes the write version from 1 to 2. Verify that the database +# Changes the write version from 1 to 3. Verify that the database # can be read but not written. # do_test rdonly-1.2 { @@ -37,7 +41,7 @@ do_test rdonly-1.2 { hexio_get_int [hexio_read test.db 18 1] } 1 do_test rdonly-1.3 { - hexio_write test.db 18 02 + hexio_write test.db 18 03 sqlite3 db test.db execsql { SELECT * FROM t1; @@ -67,8 +71,10 @@ do_test rdonly-1.5 { # write-version is reloaded). This way, SQLite does not discover that # the database is read-only until after it is locked. # +set ro_version 02 +ifcapable wal { set ro_version 03 } do_test rdonly-1.6 { - hexio_write test.db 18 02 ; # write-version + hexio_write test.db 18 $ro_version ; # write-version hexio_write test.db 24 11223344 ; # change-counter catchsql { INSERT INTO t1 VALUES(2); diff --git a/test/rollback.test b/test/rollback.test index 93d1246..d462fc4 100644 --- a/test/rollback.test +++ b/test/rollback.test @@ -79,11 +79,9 @@ do_test rollback-1.9 { sqlite3_finalize $STMT } {SQLITE_OK} -set permutation "" -catch {set permutation $::permutations_test_prefix} if {$tcl_platform(platform) == "unix" - && $permutation ne "onefile" - && $permutation ne "inmemory_journal" + && [permutation] ne "onefile" + && [permutation] ne "inmemory_journal" } { do_test rollback-2.1 { execsql { @@ -135,7 +133,7 @@ if {$tcl_platform(platform) == "unix" SELECT distinct tbl_name FROM sqlite_master; } db2 } {t1 t3} - if {[lsearch {exclusive persistent_journal no_journal} $permutation]<0} { + if {[lsearch {exclusive persistent_journal no_journal} [permutation]]<0} { do_test rollback-2.3 { file exists testA.db-journal } 0 diff --git a/test/rowhash.test b/test/rowhash.test index 0d260e9..4a553cd 100644 --- a/test/rowhash.test +++ b/test/rowhash.test @@ -12,7 +12,9 @@ # This file implements regression tests for SQLite library. The # focus of this file is the code in rowhash.c. # -# $Id: rowhash.test,v 1.5 2009/05/02 12:02:02 drh Exp $ +# NB: The rowhash.c module is no longer part of the source tree. But +# we might as well keep this test. +# set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -45,6 +47,7 @@ do_keyset_test rowhash-2.2 {0 1 2 3} do_keyset_test rowhash-2.3 {62 125 188} if {[working_64bit_int]} { expr srand(1) + unset -nocomplain i L for {set i 4} {$i < 10} {incr i} { for {set j 0} {$j < 5000} {incr j} { lappend L [expr int(rand()*1000000000)] diff --git a/test/rtree.test b/test/rtree.test index 11f5ab9..5603b05 100644 --- a/test/rtree.test +++ b/test/rtree.test @@ -15,7 +15,7 @@ rename finish_test rtree_finish_test proc finish_test {} {} set RTREE_EXCLUDE { } -if {[info exists ISQUICK] && $ISQUICK} { +if {[info exists G(isquick)] && $G(isquick)} { set RTREE_EXCLUDE rtree3.test } @@ -28,8 +28,7 @@ foreach testfile [lsort -dictionary [glob -nocomplain $rtreedir/*.test]] { catch {db close} if {$sqlite_open_file_count>0} { puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail + fail_test $tail set sqlite_open_file_count 0 } } diff --git a/test/savepoint.test b/test/savepoint.test index 71037d6..29f64f6 100644 --- a/test/savepoint.test +++ b/test/savepoint.test @@ -13,7 +13,8 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl - +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl #---------------------------------------------------------------------- # The following tests - savepoint-1.* - test that the SAVEPOINT, RELEASE @@ -21,6 +22,7 @@ source $testdir/tester.tcl # flag is correctly set and unset as a result. # do_test savepoint-1.1 { + wal_set_journal_mode execsql { SAVEPOINT sp1; RELEASE sp1; @@ -93,6 +95,7 @@ do_test savepoint-1.5 { do_test savepoint-1.6 { execsql COMMIT } {} +wal_check_journal_mode savepoint-1.7 #------------------------------------------------------------------------ # These tests - savepoint-2.* - test rollbacks and releases of savepoints @@ -175,36 +178,42 @@ do_test savepoint-2.11 { } execsql { SELECT * FROM t1 } } {} +wal_check_journal_mode savepoint-2.12 #------------------------------------------------------------------------ # This block of tests - savepoint-3.* - test that when a transaction # savepoint is rolled back, locks are not released from database files. # And that when a transaction savepoint is released, they are released. +# +# These tests do not work in WAL mode. WAL mode does not take RESERVED +# locks on the database file. # -do_test savepoint-3.1 { - execsql { SAVEPOINT "transaction" } - execsql { PRAGMA lock_status } -} {main unlocked temp closed} - -do_test savepoint-3.2 { - execsql { INSERT INTO t1 VALUES(1, 2, 3) } - execsql { PRAGMA lock_status } -} {main reserved temp closed} - -do_test savepoint-3.3 { - execsql { ROLLBACK TO "transaction" } - execsql { PRAGMA lock_status } -} {main reserved temp closed} - -do_test savepoint-3.4 { - execsql { INSERT INTO t1 VALUES(1, 2, 3) } - execsql { PRAGMA lock_status } -} {main reserved temp closed} - -do_test savepoint-3.5 { - execsql { RELEASE "transaction" } - execsql { PRAGMA lock_status } -} {main unlocked temp closed} +if {[wal_is_wal_mode]==0} { + do_test savepoint-3.1 { + execsql { SAVEPOINT "transaction" } + execsql { PRAGMA lock_status } + } {main unlocked temp closed} + + do_test savepoint-3.2 { + execsql { INSERT INTO t1 VALUES(1, 2, 3) } + execsql { PRAGMA lock_status } + } {main reserved temp closed} + + do_test savepoint-3.3 { + execsql { ROLLBACK TO "transaction" } + execsql { PRAGMA lock_status } + } {main reserved temp closed} + + do_test savepoint-3.4 { + execsql { INSERT INTO t1 VALUES(1, 2, 3) } + execsql { PRAGMA lock_status } + } {main reserved temp closed} + + do_test savepoint-3.5 { + execsql { RELEASE "transaction" } + execsql { PRAGMA lock_status } + } {main unlocked temp closed} +} #------------------------------------------------------------------------ # Test that savepoints that include schema modifications are handled @@ -264,6 +273,7 @@ do_test savepoint-4.7 { do_test savepoint-4.8 { execsql COMMIT } {} +wal_check_journal_mode savepoint-4.9 #------------------------------------------------------------------------ # Test some logic errors to do with the savepoint feature. @@ -312,6 +322,18 @@ ifcapable incrblob { execsql {release abc} } {} + # Rollback mode: + # + # Open a savepoint transaction and insert a row into the database. Then, + # using a second database handle, open a read-only transaction on the + # database file. Check that the savepoint transaction cannot be committed + # until after the read-only transaction has been closed. + # + # WAL mode: + # + # As above, except that the savepoint transaction can be successfully + # committed before the read-only transaction has been closed. + # do_test savepoint-5.4.1 { execsql { SAVEPOINT main; @@ -320,17 +342,28 @@ ifcapable incrblob { } {} do_test savepoint-5.4.2 { sqlite3 db2 test.db - execsql { BEGIN ; SELECT * FROM blobs } db2 - catchsql { RELEASE main } - } {1 {database is locked}} - do_test savepoint-5.4.3 { - db2 close - catchsql { RELEASE main } - } {0 {}} - do_test savepoint-5.4.4 { + execsql { BEGIN ; SELECT count(*) FROM blobs } db2 + } {1} + if {[wal_is_wal_mode]} { + do_test savepoint-5.4.3 { catchsql "RELEASE main" } {0 {}} + do_test savepoint-5.4.4 { db2 close } {} + } else { + do_test savepoint-5.4.3 { + catchsql { RELEASE main } + } {1 {database is locked}} + do_test savepoint-5.4.4 { + db2 close + catchsql { RELEASE main } + } {0 {}} + } + do_test savepoint-5.4.5 { execsql { SELECT x FROM blobs WHERE rowid = 2 } } {{another blob}} + do_test savepoint-5.4.6 { + execsql { SELECT count(*) FROM blobs } + } {2} } +wal_check_journal_mode savepoint-5.5 #------------------------------------------------------------------------- # The following tests, savepoint-6.*, test an incr-vacuum inside of a @@ -342,8 +375,9 @@ ifcapable {autovacuum && pragma} { sqlite3 db test.db do_test savepoint-6.1 { - execsql { - PRAGMA auto_vacuum = incremental; + execsql { PRAGMA auto_vacuum = incremental } + wal_set_journal_mode + execsql { CREATE TABLE t1(a, b, c); CREATE INDEX i1 ON t1(a, b); BEGIN; @@ -376,6 +410,8 @@ ifcapable {autovacuum && pragma} { } {} integrity_check savepoint-6.4 + + wal_check_journal_mode savepoint-6.5 } #------------------------------------------------------------------------- @@ -387,8 +423,9 @@ file delete -force test.db sqlite3 db test.db do_test savepoint-7.1 { + execsql { PRAGMA auto_vacuum = incremental } + wal_set_journal_mode execsql { - PRAGMA auto_vacuum = incremental; PRAGMA cache_size = 10; BEGIN; CREATE TABLE t1(a PRIMARY KEY, b); @@ -449,13 +486,15 @@ do_test savepoint-7.3.2 { } execsql { PRAGMA integrity_check } } {ok} +wal_check_journal_mode savepoint-7.3.3 do_test savepoint-7.4.1 { db close file delete -force test.db sqlite3 db test.db + execsql { PRAGMA auto_vacuum = incremental } + wal_set_journal_mode execsql { - PRAGMA auto_vacuum = incremental; CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); INSERT INTO t1 VALUES(randstr(1000,1000), randstr(1000,1000)); BEGIN; @@ -497,6 +536,7 @@ do_test savepoint-7.5.2 { DROP TABLE t5; } } {} +wal_check_journal_mode savepoint-7.5.3 # Test oddly named and quoted savepoints. # @@ -598,120 +638,122 @@ do_test savepoint-10.1.3 { set templockstate [lindex [db eval {PRAGMA lock_status}] 3] -do_test savepoint-10.2.1 { - file delete -force test3.db - file delete -force test2.db - execsql { - ATTACH 'test2.db' AS aux1; - ATTACH 'test3.db' AS aux2; - DROP TABLE t1; - CREATE TABLE main.t1(x, y); - CREATE TABLE aux1.t2(x, y); - CREATE TABLE aux2.t3(x, y); - SELECT name FROM sqlite_master - UNION ALL - SELECT name FROM aux1.sqlite_master - UNION ALL - SELECT name FROM aux2.sqlite_master; - } -} {t1 t2 t3} -do_test savepoint-10.2.2 { - execsql { PRAGMA lock_status } -} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] - -do_test savepoint-10.2.3 { - execsql { - SAVEPOINT one; - INSERT INTO t1 VALUES(1, 2); - PRAGMA lock_status; - } -} [list main reserved temp $templockstate aux1 unlocked aux2 unlocked] -do_test savepoint-10.2.4 { - execsql { - INSERT INTO t3 VALUES(3, 4); - PRAGMA lock_status; - } -} [list main reserved temp $templockstate aux1 unlocked aux2 reserved] -do_test savepoint-10.2.5 { - execsql { - SAVEPOINT two; - INSERT INTO t2 VALUES(5, 6); - PRAGMA lock_status; - } -} [list main reserved temp $templockstate aux1 reserved aux2 reserved] -do_test savepoint-10.2.6 { - execsql { SELECT * FROM t2 } -} {5 6} -do_test savepoint-10.2.7 { - execsql { ROLLBACK TO two } - execsql { SELECT * FROM t2 } -} {} -do_test savepoint-10.2.8 { - execsql { PRAGMA lock_status } -} [list main reserved temp $templockstate aux1 reserved aux2 reserved] -do_test savepoint-10.2.9 { - execsql { SELECT 'a', * FROM t1 UNION ALL SELECT 'b', * FROM t3 } -} {a 1 2 b 3 4} -do_test savepoint-10.2.9 { - execsql { - INSERT INTO t2 VALUES(5, 6); - RELEASE one; - } - execsql { - SELECT * FROM t1; - SELECT * FROM t2; - SELECT * FROM t3; - } -} {1 2 5 6 3 4} -do_test savepoint-10.2.9 { - execsql { PRAGMA lock_status } -} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] - -do_test savepoint-10.2.10 { - execsql { - SAVEPOINT one; - INSERT INTO t1 VALUES('a', 'b'); +if {[wal_is_wal_mode]==0} { + do_test savepoint-10.2.1 { + file delete -force test3.db + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux1; + ATTACH 'test3.db' AS aux2; + DROP TABLE t1; + CREATE TABLE main.t1(x, y); + CREATE TABLE aux1.t2(x, y); + CREATE TABLE aux2.t3(x, y); + SELECT name FROM sqlite_master + UNION ALL + SELECT name FROM aux1.sqlite_master + UNION ALL + SELECT name FROM aux2.sqlite_master; + } + } {t1 t2 t3} + do_test savepoint-10.2.2 { + execsql { PRAGMA lock_status } + } [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + + do_test savepoint-10.2.3 { + execsql { + SAVEPOINT one; + INSERT INTO t1 VALUES(1, 2); + PRAGMA lock_status; + } + } [list main reserved temp $templockstate aux1 unlocked aux2 unlocked] + do_test savepoint-10.2.4 { + execsql { + INSERT INTO t3 VALUES(3, 4); + PRAGMA lock_status; + } + } [list main reserved temp $templockstate aux1 unlocked aux2 reserved] + do_test savepoint-10.2.5 { + execsql { SAVEPOINT two; - INSERT INTO t2 VALUES('c', 'd'); - SAVEPOINT three; - INSERT INTO t3 VALUES('e', 'f'); - } - execsql { - SELECT * FROM t1; - SELECT * FROM t2; - SELECT * FROM t3; - } -} {1 2 a b 5 6 c d 3 4 e f} -do_test savepoint-10.2.11 { - execsql { ROLLBACK TO two } - execsql { - SELECT * FROM t1; - SELECT * FROM t2; - SELECT * FROM t3; - } -} {1 2 a b 5 6 3 4} -do_test savepoint-10.2.12 { - execsql { - INSERT INTO t3 VALUES('g', 'h'); - ROLLBACK TO two; - } - execsql { - SELECT * FROM t1; - SELECT * FROM t2; - SELECT * FROM t3; - } -} {1 2 a b 5 6 3 4} -do_test savepoint-10.2.13 { - execsql { ROLLBACK } - execsql { - SELECT * FROM t1; - SELECT * FROM t2; - SELECT * FROM t3; - } -} {1 2 5 6 3 4} -do_test savepoint-10.2.14 { - execsql { PRAGMA lock_status } -} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + INSERT INTO t2 VALUES(5, 6); + PRAGMA lock_status; + } + } [list main reserved temp $templockstate aux1 reserved aux2 reserved] + do_test savepoint-10.2.6 { + execsql { SELECT * FROM t2 } + } {5 6} + do_test savepoint-10.2.7 { + execsql { ROLLBACK TO two } + execsql { SELECT * FROM t2 } + } {} + do_test savepoint-10.2.8 { + execsql { PRAGMA lock_status } + } [list main reserved temp $templockstate aux1 reserved aux2 reserved] + do_test savepoint-10.2.9 { + execsql { SELECT 'a', * FROM t1 UNION ALL SELECT 'b', * FROM t3 } + } {a 1 2 b 3 4} + do_test savepoint-10.2.9 { + execsql { + INSERT INTO t2 VALUES(5, 6); + RELEASE one; + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } + } {1 2 5 6 3 4} + do_test savepoint-10.2.9 { + execsql { PRAGMA lock_status } + } [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + + do_test savepoint-10.2.10 { + execsql { + SAVEPOINT one; + INSERT INTO t1 VALUES('a', 'b'); + SAVEPOINT two; + INSERT INTO t2 VALUES('c', 'd'); + SAVEPOINT three; + INSERT INTO t3 VALUES('e', 'f'); + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } + } {1 2 a b 5 6 c d 3 4 e f} + do_test savepoint-10.2.11 { + execsql { ROLLBACK TO two } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } + } {1 2 a b 5 6 3 4} + do_test savepoint-10.2.12 { + execsql { + INSERT INTO t3 VALUES('g', 'h'); + ROLLBACK TO two; + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } + } {1 2 a b 5 6 3 4} + do_test savepoint-10.2.13 { + execsql { ROLLBACK } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } + } {1 2 5 6 3 4} + do_test savepoint-10.2.14 { + execsql { PRAGMA lock_status } + } [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] +} #------------------------------------------------------------------------- # The following tests - savepoint-11.* - test the interaction of @@ -722,8 +764,9 @@ do_test savepoint-11.1 { db close file delete -force test.db sqlite3 db test.db + execsql { PRAGMA auto_vacuum = full; } + wal_set_journal_mode execsql { - PRAGMA auto_vacuum = full; CREATE TABLE t1(a, b, UNIQUE(a, b)); INSERT INTO t1 VALUES(1, randstr(1000,1000)); INSERT INTO t1 VALUES(2, randstr(1000,1000)); @@ -751,10 +794,10 @@ do_test savepoint-11.6 { integrity_check savepoint-11.7 do_test savepoint-11.8 { execsql { ROLLBACK } + execsql { PRAGMA wal_checkpoint } file size test.db } {8192} - do_test savepoint-11.9 { execsql { DROP TABLE IF EXISTS t1; @@ -782,6 +825,7 @@ do_test savepoint-11.11 { do_test savepoint-11.12 { execsql {SELECT * FROM t2} } {1 2 3 4} +wal_check_journal_mode savepoint-11.13 #------------------------------------------------------------------------- # The following tests - savepoint-12.* - test the interaction of @@ -815,50 +859,164 @@ do_test savepoint-12.3 { do_test savepoint-12.4 { execsql { SAVEPOINT one } } {} +wal_check_journal_mode savepoint-12.5 #------------------------------------------------------------------------- # The following tests - savepoint-13.* - test the interaction of # savepoints and "journal_mode = off". # -do_test savepoint-13.1 { - db close - catch {file delete -force test.db} - sqlite3 db test.db - execsql { - BEGIN; - CREATE TABLE t1(a PRIMARY KEY, b); - INSERT INTO t1 VALUES(1, 2); - COMMIT; - PRAGMA journal_mode = off; - } -} {off} -do_test savepoint-13.2 { - execsql { - BEGIN; - INSERT INTO t1 VALUES(3, 4); - INSERT INTO t1 SELECT a+4,b+4 FROM t1; - COMMIT; - } -} {} -do_test savepoint-13.3 { - execsql { - BEGIN; - INSERT INTO t1 VALUES(9, 10); - SAVEPOINT s1; - INSERT INTO t1 VALUES(11, 12); - COMMIT; - } -} {} -do_test savepoint-13.4 { - execsql { - BEGIN; - INSERT INTO t1 VALUES(13, 14); - SAVEPOINT s1; - INSERT INTO t1 VALUES(15, 16); - ROLLBACK TO s1; - ROLLBACK; - SELECT * FROM t1; - } -} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} +if {[wal_is_wal_mode]==0} { + do_test savepoint-13.1 { + db close + catch {file delete -force test.db} + sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + PRAGMA journal_mode = off; + } + } {off} + do_test savepoint-13.2 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 SELECT a+4,b+4 FROM t1; + COMMIT; + } + } {} + do_test savepoint-13.3 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(9, 10); + SAVEPOINT s1; + INSERT INTO t1 VALUES(11, 12); + COMMIT; + } + } {} + do_test savepoint-13.4 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(13, 14); + SAVEPOINT s1; + INSERT INTO t1 VALUES(15, 16); + ROLLBACK TO s1; + ROLLBACK; + SELECT * FROM t1; + } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} +} + +db close +file delete test.db +do_multiclient_test tn { + do_test savepoint-14.$tn.1 { + sql1 { + CREATE TABLE foo(x); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(2); + } + sql2 { + BEGIN; + SELECT * FROM foo; + } + } {1 2} + do_test savepoint-14.$tn.2 { + sql1 { + SAVEPOINT one; + INSERT INTO foo VALUES(1); + } + csql1 { RELEASE one } + } {1 {database is locked}} + do_test savepoint-14.$tn.3 { + sql1 { ROLLBACK TO one } + sql2 { COMMIT } + sql1 { RELEASE one } + } {} + + do_test savepoint-14.$tn.4 { + sql2 { + BEGIN; + SELECT * FROM foo; + } + } {1 2} + do_test savepoint-14.$tn.5 { + sql1 { + SAVEPOINT one; + INSERT INTO foo VALUES(1); + } + csql1 { RELEASE one } + } {1 {database is locked}} + do_test savepoint-14.$tn.6 { + sql2 { COMMIT } + sql1 { + ROLLBACK TO one; + INSERT INTO foo VALUES(3); + INSERT INTO foo VALUES(4); + INSERT INTO foo VALUES(5); + RELEASE one; + } + } {} + do_test savepoint-14.$tn.7 { + sql2 { CREATE INDEX fooidx ON foo(x); } + sql3 { PRAGMA integrity_check } + } {ok} +} + +do_multiclient_test tn { + do_test savepoint-15.$tn.1 { + sql1 { + CREATE TABLE foo(x); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(2); + } + sql2 { BEGIN; SELECT * FROM foo; } + } {1 2} + do_test savepoint-15.$tn.2 { + sql1 { + PRAGMA locking_mode = EXCLUSIVE; + BEGIN; + INSERT INTO foo VALUES(3); + } + csql1 { COMMIT } + } {1 {database is locked}} + do_test savepoint-15.$tn.3 { + sql1 { ROLLBACK } + sql2 { COMMIT } + sql1 { + INSERT INTO foo VALUES(3); + PRAGMA locking_mode = NORMAL; + INSERT INTO foo VALUES(4); + } + sql2 { CREATE INDEX fooidx ON foo(x); } + sql3 { PRAGMA integrity_check } + } {ok} +} + +do_multiclient_test tn { + do_test savepoint-16.$tn.1 { + sql1 { + CREATE TABLE foo(x); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(2); + } + } {} + do_test savepoint-16.$tn.2 { + + db eval {SELECT * FROM foo} { + sql1 { INSERT INTO foo VALUES(3) } + sql2 { SELECT * FROM foo } + sql1 { INSERT INTO foo VALUES(4) } + break + } + + sql2 { CREATE INDEX fooidx ON foo(x); } + sql3 { PRAGMA integrity_check } + } {ok} + do_test savepoint-16.$tn.3 { + sql1 { SELECT * FROM foo } + } {1 2 3 4} +} finish_test diff --git a/test/savepoint2.test b/test/savepoint2.test index 10765a3..be1bdbd 100644 --- a/test/savepoint2.test +++ b/test/savepoint2.test @@ -14,6 +14,7 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl + # Tests in this file are quite similar to those run by trans.test and # avtrans.test. # @@ -23,6 +24,7 @@ proc signature {} { } do_test savepoint2-1 { + wal_set_journal_mode execsql { PRAGMA cache_size=10; BEGIN; @@ -42,6 +44,7 @@ do_test savepoint2-1 { SELECT count(*) FROM t3; } } {1024} +wal_check_journal_mode savepoint2-1.1 unset -nocomplain ::sig unset -nocomplain SQL @@ -140,6 +143,9 @@ for {set ii 2} {$ii < ($iterations+2)} {incr ii} { sqlite3_get_autocommit db } {1} integrity_check savepoint2-$ii.6.1 + + # Check that the connection is still running in WAL mode. + wal_check_journal_mode savepoint2-$ii.7 } unset -nocomplain ::sig diff --git a/test/savepoint6.test b/test/savepoint6.test index 33758b8..60fde4c 100644 --- a/test/savepoint6.test +++ b/test/savepoint6.test @@ -26,6 +26,10 @@ set DATABASE_SCHEMA { CREATE INDEX i2 ON t1(y); } +if {0==[info exists ::G(savepoint6_iterations)]} { + set ::G(savepoint6_iterations) 1000 +} + #-------------------------------------------------------------------------- # In memory database state. # @@ -222,10 +226,11 @@ foreach zSetup [list { set testname normal sqlite3 db test.db } { + if {[wal_is_wal_mode]} continue set testname tempdb sqlite3 db "" } { - if {[catch {set ::permutations_test_prefix} z] == 0 && $z eq "journaltest"} { + if {[permutation] eq "journaltest"} { continue } set testname nosync @@ -241,10 +246,12 @@ foreach zSetup [list { unset -nocomplain ::aEntry catch { db close } - file delete -force test.db + file delete -force test.db test.db-wal test.db-journal eval $zSetup sql $DATABASE_SCHEMA + wal_set_journal_mode + do_test savepoint6-$testname.setup { savepoint one insert_rows [random_integers 100 1000] @@ -252,7 +259,7 @@ foreach zSetup [list { checkdb } {ok} - for {set i 0} {$i < 1000} {incr i} { + for {set i 0} {$i < $::G(savepoint6_iterations)} {incr i} { do_test savepoint6-$testname.$i.1 { savepoint_op checkdb @@ -264,6 +271,8 @@ foreach zSetup [list { checkdb } {ok} } + + wal_check_journal_mode savepoint6-$testname.walok } unset -nocomplain ::lSavepoint diff --git a/test/schema3.test b/test/schema3.test new file mode 100644 index 0000000..ba7d745 --- /dev/null +++ b/test/schema3.test @@ -0,0 +1,97 @@ +# 2010 Jun 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl +source $testdir/lock_common.tcl + +# This block tests that if one client modifies the database schema, a +# second client updates its internal cache of the database schema before +# executing any queries. Specifically, it does not return a "no such column" +# or "no such table" error if the table or column in question does exist +# but was added after the second client loaded its cache of the database +# schema. +# +# Types of schema modifications: +# +# 1. Adding a database table. +# 2. Adding a database view. +# 3. Adding a database index. +# 4. Adding a database trigger. +# 5. Adding a column to an existing table (ALTER TABLE). +# +do_multiclient_test tn { + + # Have connections [db1] and [db2] load the current database schema. + # + sql1 { SELECT * FROM sqlite_master } + sql2 { SELECT * FROM sqlite_master } + + foreach {tn2 c1 c2} { + 1 { CREATE TABLE t1(a, b) } { SELECT * FROM t1 } + 2 { CREATE TABLE t2(a, b) } { UPDATE t2 SET a = b } + 3 { CREATE TABLE t3(a, b) } { DELETE FROM t3 } + 4 { CREATE TABLE t4(a, b) } { INSERT INTO t4 VALUES(1, 2) } + 5 { CREATE TABLE t5(a, b) } { DROP TABLE t5 } + 6 { CREATE TABLE t6(a, b) } { CREATE INDEX i1 ON t6(a) } + + 7 { ALTER TABLE t1 ADD COLUMN c } { SELECT a, b, c FROM t1 } + 8 { ALTER TABLE t2 ADD COLUMN c } { UPDATE t2 SET a = c } + 9 { ALTER TABLE t2 ADD COLUMN d } { UPDATE t2 SET d = c } + 10 { ALTER TABLE t3 ADD COLUMN c } { DELETE FROM t3 WHERE c>10 } + 11 { ALTER TABLE t4 ADD COLUMN c } { INSERT INTO t4(a,b,c) VALUES(1,2,3) } + 12 { ALTER TABLE t6 ADD COLUMN c } { CREATE INDEX i2 ON t6(c) } + 13 { ALTER TABLE t6 ADD COLUMN d } { + CREATE TRIGGER tr1 AFTER UPDATE OF d ON t6 BEGIN + SELECT 1, 2, 3; + END; + } + + 14 { CREATE INDEX i3 ON t1(a) } { DROP INDEX i3 } + 15 { CREATE INDEX i4 ON t2(a) } { + SELECT * FROM t2 INDEXED BY i4 ORDER BY a + } + + 16 { CREATE TRIGGER tr2 AFTER INSERT ON t3 BEGIN SELECT 1 ; END } { + DROP TRIGGER tr2 + } + + 17 { CREATE VIEW v1 AS SELECT * FROM t1 } { SELECT a,b,c FROM v1 } + 18 { ALTER TABLE t1 ADD COLUMN d } { SELECT a,b,c,d FROM v1 } + + 19 { CREATE TABLE t7(a, b) } { + DROP TABLE IF EXISTS t7; CREATE TABLE t7(c, d); + } + 20 { CREATE INDEX i5 ON t7(c, d) } { + DROP INDEX IF EXISTS i5; CREATE INDEX i5 ON t7(c) + } + 21 { CREATE TRIGGER tr3 BEFORE DELETE ON t7 BEGIN SELECT 1, 2, 3 ; END } { + DROP TRIGGER IF EXISTS tr3; + CREATE TRIGGER tr3 AFTER INSERT ON t7 BEGIN SELECT 1, 2, 3 ; END + } + + 22 { CREATE TABLE t8(a, b) } { + CREATE TRIGGER tr4 AFTER UPDATE OF a ON t8 BEGIN + SELECT 1, 2, 3; + END; + } + } { + do_test schema3-1.$tn.$tn2 { + sql1 $c1 + sql2 $c2 + } {} + } +} + +finish_test diff --git a/test/select2.test b/test/select2.test index 25a552d..35f8dd5 100644 --- a/test/select2.test +++ b/test/select2.test @@ -153,12 +153,12 @@ do_test select2-4.1 { do_test select2-4.2 { execsql { INSERT INTO bb VALUES(0); - SELECT * FROM aa, bb WHERE b; + SELECT * FROM aa CROSS JOIN bb WHERE b; } } {1 2 1 4 3 2 3 4} do_test select2-4.3 { execsql { - SELECT * FROM aa, bb WHERE NOT b; + SELECT * FROM aa CROSS JOIN bb WHERE NOT b; } } {1 0 3 0} do_test select2-4.4 { diff --git a/test/select9.test b/test/select9.test index eeadf13..085dee0 100644 --- a/test/select9.test +++ b/test/select9.test @@ -25,8 +25,6 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl -#set ISQUICK 1 - #------------------------------------------------------------------------- # test_compound_select TESTNAME SELECT RESULT # @@ -62,7 +60,7 @@ proc test_compound_select {testname sql result} { set iLimitIncr 1 set iOffsetIncr 1 - if {[info exists ::ISQUICK] && $::ISQUICK && $nRow>=5} { + if {[info exists ::G(isquick)] && $::G(isquick) && $nRow>=5} { set iOffsetIncr [expr $nRow / 5] set iLimitIncr [expr $nRow / 5] } diff --git a/test/selectC.test b/test/selectC.test index 7ae2690..45250ef 100644 --- a/test/selectC.test +++ b/test/selectC.test @@ -166,4 +166,48 @@ ifcapable trigger { } {1 {no such column: new.x}} } +# Check that ticket [883034dcb5] is fixed. +# +do_test selectC-3.1 { + execsql { + CREATE TABLE person ( + org_id TEXT NOT NULL, + nickname TEXT NOT NULL, + license TEXT, + CONSTRAINT person_pk PRIMARY KEY (org_id, nickname), + CONSTRAINT person_license_uk UNIQUE (license) + ); + INSERT INTO person VALUES('meyers', 'jack', '2GAT123'); + INSERT INTO person VALUES('meyers', 'hill', 'V345FMP'); + INSERT INTO person VALUES('meyers', 'jim', '2GAT138'); + INSERT INTO person VALUES('smith', 'maggy', ''); + INSERT INTO person VALUES('smith', 'jose', 'JJZ109'); + INSERT INTO person VALUES('smith', 'jack', 'THX138'); + INSERT INTO person VALUES('lakeside', 'dave', '953OKG'); + INSERT INTO person VALUES('lakeside', 'amy', NULL); + INSERT INTO person VALUES('lake-apts', 'tom', NULL); + INSERT INTO person VALUES('acorn', 'hideo', 'CQB421'); + + SELECT + org_id, + count((NOT (org_id IS NULL)) AND (NOT (nickname IS NULL))) + FROM person + WHERE (CASE WHEN license != '' THEN 1 ELSE 0 END) + GROUP BY 1; + } +} {acorn 1 lakeside 1 meyers 3 smith 2} +do_test selectC-3.2 { + execsql { + CREATE TABLE t2(a PRIMARY KEY, b); + INSERT INTO t2 VALUES('abc', 'xxx'); + INSERT INTO t2 VALUES('def', 'yyy'); + SELECT a, max(b || a) FROM t2 WHERE (b||b||b)!='value' GROUP BY a; + } +} {abc xxxabc def yyydef} +do_test selectC-3.3 { + execsql { + SELECT b, max(a || b) FROM t2 WHERE (b||b||b)!='value' GROUP BY a; + } +} {xxx abcxxx yyy defyyy} + finish_test diff --git a/test/shared3.test b/test/shared3.test index d9d5fa8..8f9eae9 100644 --- a/test/shared3.test +++ b/test/shared3.test @@ -90,12 +90,12 @@ do_test shared3-2.8 { execsql { INSERT INTO t1 VALUES(10, randomblob(10000)) } db1 - sqlite3 db3 $alternative_name # If the pager-cache is really still limited to 10 pages, then the INSERT # statement above should have caused the pager to grab an exclusive lock # on the database file so that the cache could be spilled. # + catch { sqlite3 db3 $alternative_name } catchsql {select count(*) from sqlite_master} db3 } {1 {database is locked}} diff --git a/test/soak.test b/test/soak.test index 6bfd40a..c457dec 100644 --- a/test/soak.test +++ b/test/soak.test @@ -65,14 +65,14 @@ set SOAKTESTS { corruptC.test } -set ISQUICK 1 +set G(isquick) 1 set soak_starttime [clock seconds] set soak_finishtime [expr {$soak_starttime + $TIMEOUT}] # Loop until the timeout is reached or an error occurs. # -for {set iRun 0} {[clock seconds] < $soak_finishtime && $nErr==0} {incr iRun} { +for {set iRun 0} {[clock seconds] < $soak_finishtime} {incr iRun} { set iIdx [expr {$iRun % [llength $SOAKTESTS]}] source [file join $testdir [lindex $SOAKTESTS $iIdx]] @@ -80,11 +80,11 @@ for {set iRun 0} {[clock seconds] < $soak_finishtime && $nErr==0} {incr iRun} { if {$sqlite_open_file_count>0} { puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail + fail_test $tail set sqlite_open_file_count 0 } + if {[set_test_counter errors]>0} break } really_finish_test diff --git a/test/softheap1.test b/test/softheap1.test index 5a06095..6855553 100644 --- a/test/softheap1.test +++ b/test/softheap1.test @@ -45,6 +45,5 @@ do_test softheap1-1.1 { } } {ok} -sqlite3_soft_heap_limit $soft_limit - +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) finish_test diff --git a/test/stat.test b/test/stat.test new file mode 100644 index 0000000..177da0a --- /dev/null +++ b/test/stat.test @@ -0,0 +1,154 @@ +# 2010 July 09 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the SELECT statement. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + + +set ::asc 1 +proc a_string {n} { string range [string repeat [incr ::asc]. $n] 1 $n } +db func a_string a_string + +register_dbstat_vtab db +do_execsql_test stat-0.0 { + CREATE VIRTUAL TABLE temp.stat USING dbstat; + SELECT * FROM stat; +} {} +do_execsql_test stat-0.1 { + PRAGMA journal_mode = WAL; + PRAGMA journal_mode = delete; + SELECT * FROM stat; +} {wal delete sqlite_master / 1 leaf 0 0 916 0} + +do_test stat-1.0 { + execsql { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(b); + INSERT INTO t1(rowid, a, b) VALUES(2, 2, 3); + INSERT INTO t1(rowid, a, b) VALUES(3, 4, 5); + } +} {} +do_test stat-1.1 { + execsql { + SELECT * FROM stat WHERE name = 't1'; + } +} {t1 / 2 leaf 2 10 998 5} +do_test stat-1.2 { + execsql { + SELECT * FROM stat WHERE name = 'i1'; + } +} {i1 / 3 leaf 2 10 1000 5} +do_test stat-1.3 { + execsql { + SELECT * FROM stat WHERE name = 'sqlite_master'; + } +} {sqlite_master / 1 leaf 2 77 831 40} +do_test stat-1.4 { + execsql { + DROP TABLE t1; + } +} {} + +do_execsql_test stat-2.1 { + CREATE TABLE t3(a PRIMARY KEY, b); + INSERT INTO t3(rowid, a, b) VALUES(2, a_string(111), a_string(222)); + INSERT INTO t3 SELECT a_string(110+rowid), a_string(221+rowid) FROM t3; + INSERT INTO t3 SELECT a_string(110+rowid), a_string(221+rowid) FROM t3; + INSERT INTO t3 SELECT a_string(110+rowid), a_string(221+rowid) FROM t3; + INSERT INTO t3 SELECT a_string(110+rowid), a_string(221+rowid) FROM t3; + INSERT INTO t3 SELECT a_string(110+rowid), a_string(221+rowid) FROM t3; + SELECT * FROM stat WHERE name != 'sqlite_master'; +} [list \ + sqlite_autoindex_t3_1 / 3 internal 3 368 623 125 \ + sqlite_autoindex_t3_1 /000/ 8 leaf 8 946 46 123 \ + sqlite_autoindex_t3_1 /001/ 9 leaf 8 988 2 131 \ + sqlite_autoindex_t3_1 /002/ 15 leaf 7 857 137 132 \ + sqlite_autoindex_t3_1 /003/ 20 leaf 6 739 257 129 \ + t3 / 2 internal 15 0 907 0 \ + t3 /000/ 4 leaf 2 678 328 340 \ + t3 /001/ 5 leaf 2 682 324 342 \ + t3 /002/ 6 leaf 2 682 324 342 \ + t3 /003/ 7 leaf 2 690 316 346 \ + t3 /004/ 10 leaf 2 682 324 342 \ + t3 /005/ 11 leaf 2 690 316 346 \ + t3 /006/ 12 leaf 2 698 308 350 \ + t3 /007/ 13 leaf 2 706 300 354 \ + t3 /008/ 14 leaf 2 682 324 342 \ + t3 /009/ 16 leaf 2 690 316 346 \ + t3 /00a/ 17 leaf 2 698 308 350 \ + t3 /00b/ 18 leaf 2 706 300 354 \ + t3 /00c/ 19 leaf 2 714 292 358 \ + t3 /00d/ 21 leaf 2 722 284 362 \ + t3 /00e/ 22 leaf 2 730 276 366 \ + t3 /00f/ 23 leaf 2 738 268 370 \ +] +do_execsql_test stat-2.2 { DROP TABLE t3 } {} + +do_execsql_test stat-3.1 { + CREATE TABLE t4(x); + CREATE INDEX i4 ON t4(x); + INSERT INTO t4(rowid, x) VALUES(2, a_string(7777)); + SELECT * FROM stat WHERE name != 'sqlite_master'; +} [list \ + i4 / 3 leaf 1 103 905 7782 \ + i4 /000+000000 9 overflow 0 1020 0 0 \ + i4 /000+000001 10 overflow 0 1020 0 0 \ + i4 /000+000002 11 overflow 0 1020 0 0 \ + i4 /000+000003 12 overflow 0 1020 0 0 \ + i4 /000+000004 13 overflow 0 1020 0 0 \ + i4 /000+000005 14 overflow 0 1020 0 0 \ + i4 /000+000006 15 overflow 0 1020 0 0 \ + i4 /000+000007 16 overflow 0 539 481 0 \ + t4 / 2 leaf 1 640 367 7780 \ + t4 /000+000000 22 overflow 0 1020 0 0 \ + t4 /000+000001 23 overflow 0 1020 0 0 \ + t4 /000+000002 21 overflow 0 1020 0 0 \ + t4 /000+000003 20 overflow 0 1020 0 0 \ + t4 /000+000004 19 overflow 0 1020 0 0 \ + t4 /000+000005 18 overflow 0 1020 0 0 \ + t4 /000+000006 17 overflow 0 1020 0 0 \ +] + +do_execsql_test stat-4.1 { + CREATE TABLE t5(x); + CREATE INDEX i5 ON t5(x); + SELECT * FROM stat WHERE name = 't5' OR name = 'i5'; +} [list \ + i5 / 5 leaf 0 0 1016 0 \ + t5 / 4 leaf 0 0 1016 0 \ +] + +db close +file delete -force test.db +sqlite3 db test.db +register_dbstat_vtab db +breakpoint +do_execsql_test stat-5.1 { + CREATE VIRTUAL TABLE temp.stat USING dbstat; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(1513)); + INSERT INTO t1 VALUES(zeroblob(1514)); + SELECT * FROM stat WHERE name = 't1'; +} [list \ + t1 / 2 leaf 2 993 5 1517 \ + t1 /000+000000 3 overflow 0 1020 0 0 \ + t1 /001+000000 4 overflow 0 1020 0 0 \ +] + +finish_test diff --git a/test/stmt.test b/test/stmt.test index 45e9ac5..482a7d4 100644 --- a/test/stmt.test +++ b/test/stmt.test @@ -50,12 +50,16 @@ do_test stmt-1.5 { execsql COMMIT set sqlite_open_file_count } {1} -do_test stmt-1.6 { +do_test stmt-1.6.1 { execsql { BEGIN; INSERT INTO t1 SELECT a+2, b+2 FROM t1; } set sqlite_open_file_count +} {2} +do_test stmt-1.6.2 { + execsql { INSERT INTO t1 SELECT a+4, b+4 FROM t1 } + set sqlite_open_file_count } {3} do_test stmt-1.7 { execsql COMMIT @@ -73,13 +77,20 @@ proc filecount {testname sql expected} { }] $expected] } -filecount stmt-2.1 { INSERT INTO t1 VALUES(5, 5) } 2 -filecount stmt-2.2 { REPLACE INTO t1 VALUES(5, 5) } 2 -filecount stmt-2.3 { INSERT INTO t1 SELECT 5, 5 } 3 +filecount stmt-2.1 { INSERT INTO t1 VALUES(9, 9) } 2 +filecount stmt-2.2 { REPLACE INTO t1 VALUES(9, 9) } 2 +filecount stmt-2.3 { INSERT INTO t1 SELECT 9, 9 } 2 +filecount stmt-2.4 { + INSERT INTO t1 SELECT 9, 9; + INSERT INTO t1 SELECT 10, 10; +} 3 -do_test stmt-2.4 { +do_test stmt-2.5 { execsql { CREATE INDEX i1 ON t1(b) } } {} -filecount stmt-2.5 { REPLACE INTO t1 VALUES(5, 5) } 3 +filecount stmt-2.6 { + REPLACE INTO t1 VALUES(5, 5); + REPLACE INTO t1 VALUES(5, 5); +} 3 finish_test diff --git a/test/table.test b/test/table.test index 173a62a..5fa9116 100644 --- a/test/table.test +++ b/test/table.test @@ -604,6 +604,7 @@ do_test table-13.1 { } } {} set i 0 +unset -nocomplain date time seconds foreach {date time seconds} { 1976-07-04 12:00:00 205329600 1994-04-16 14:00:00 766504800 diff --git a/test/tclsqlite.test b/test/tclsqlite.test index 6bae7f2..8db04eb 100644 --- a/test/tclsqlite.test +++ b/test/tclsqlite.test @@ -25,7 +25,7 @@ source $testdir/tester.tcl if {[sqlite3 -has-codec]} { set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?" } else { - set r "sqlite3 HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN?" + set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN?" } do_test tcl-1.1 { set v [catch {sqlite3 bogus} msg] @@ -35,7 +35,7 @@ do_test tcl-1.1 { do_test tcl-1.2 { set v [catch {db bogus} msg] lappend v $msg -} {1 {bad option "bogus": must be authorizer, backup, busy, cache, changes, close, collate, collation_needed, commit_hook, complete, copy, enable_load_extension, errorcode, eval, exists, function, incrblob, interrupt, last_insert_rowid, nullvalue, onecolumn, profile, progress, rekey, restore, rollback_hook, status, timeout, total_changes, trace, transaction, unlock_notify, update_hook, or version}} +} {1 {bad option "bogus": must be authorizer, backup, busy, cache, changes, close, collate, collation_needed, commit_hook, complete, copy, enable_load_extension, errorcode, eval, exists, function, incrblob, interrupt, last_insert_rowid, nullvalue, onecolumn, profile, progress, rekey, restore, rollback_hook, status, timeout, total_changes, trace, transaction, unlock_notify, update_hook, version, or wal_hook}} do_test tcl-1.2.1 { set v [catch {db cache bogus} msg] lappend v $msg diff --git a/test/tempdb.test b/test/tempdb.test index a8915cd..ef0c906 100644 --- a/test/tempdb.test +++ b/test/tempdb.test @@ -58,10 +58,7 @@ do_test tempdb-2.1 { # to be in memory. These variables are used to calculate the expected # number of open files in the test cases below. # - set jrnl_in_memory [expr { - [info exists ::permutations_test_prefix] && - $::permutations_test_prefix eq "inmemory_journal" - }] + set jrnl_in_memory [expr {[permutation] eq "inmemory_journal"}] set subj_in_memory [expr {$jrnl_in_memory || $TEMP_STORE == 3}] db close @@ -74,6 +71,7 @@ do_test tempdb-2.2 { BEGIN; INSERT INTO t1 VALUES(1, 2, 3); INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t2 VALUES(7, 8, 9); INSERT INTO t2 SELECT * FROM t1; } catchsql { INSERT INTO t1 SELECT * FROM t2 } diff --git a/test/tester.tcl b/test/tester.tcl index 8fe877e..d921955 100644 --- a/test/tester.tcl +++ b/test/tester.tcl @@ -13,136 +13,225 @@ # # $Id: tester.tcl,v 1.143 2009/04/09 01:23:49 drh Exp $ +#------------------------------------------------------------------------- +# The commands provided by the code in this file to help with creating +# test cases are as follows: # -# What for user input before continuing. This gives an opportunity -# to connect profiling tools to the process. +# Commands to manipulate the db and the file-system at a high level: +# +# copy_file FROM TO +# drop_all_table ?DB? +# forcedelete FILENAME +# +# Test the capability of the SQLite version built into the interpreter to +# determine if a specific test can be run: +# +# ifcapable EXPR +# +# Calulate checksums based on database contents: +# +# dbcksum DB DBNAME +# allcksum ?DB? +# cksum ?DB? +# +# Commands to execute/explain SQL statements: +# +# stepsql DB SQL +# execsql2 SQL +# explain_no_trace SQL +# explain SQL ?DB? +# catchsql SQL ?DB? +# execsql SQL ?DB? +# +# Commands to run test cases: +# +# do_ioerr_test TESTNAME ARGS... +# crashsql ARGS... +# integrity_check TESTNAME ?DB? +# do_test TESTNAME SCRIPT EXPECTED +# do_execsql_test TESTNAME SQL EXPECTED +# do_catchsql_test TESTNAME SQL EXPECTED +# +# Commands providing a lower level interface to the global test counters: +# +# set_test_counter COUNTER ?VALUE? +# omit_test TESTNAME REASON +# fail_test TESTNAME +# incr_ntest +# +# Command run at the end of each test file: +# +# finish_test +# +# Commands to help create test files that run with the "WAL" and other +# permutations (see file permutations.test): +# +# wal_is_wal_mode +# wal_set_journal_mode ?DB? +# wal_check_journal_mode TESTNAME?DB? +# permutation # -for {set i 0} {$i<[llength $argv]} {incr i} { - if {[regexp {^-+pause$} [lindex $argv $i] all value]} { - puts -nonewline "Press RETURN to begin..." - flush stdout - gets stdin - set argv [lreplace $argv $i $i] - } -} +# Set the precision of FP arithmatic used by the interpreter. And +# configure SQLite to take database file locks on the page that begins +# 64KB into the database file instead of the one 1GB in. This means +# the code that handles that special case can be tested without creating +# very large database files. +# set tcl_precision 15 sqlite3_test_control_pending_byte 0x0010000 -# -# Check the command-line arguments for a default soft-heap-limit. -# Store this default value in the global variable ::soft_limit and -# update the soft-heap-limit each time this script is run. In that -# way if an individual test file changes the soft-heap-limit, it -# will be reset at the start of the next test file. + +# If the pager codec is available, create a wrapper for the [sqlite3] +# command that appends "-key {xyzzy}" to the command line. i.e. this: # -if {![info exists soft_limit]} { - set soft_limit 0 - for {set i 0} {$i<[llength $argv]} {incr i} { - if {[regexp {^--soft-heap-limit=(.+)$} [lindex $argv $i] all value]} { - if {$value!="off"} { - set soft_limit $value - } - set argv [lreplace $argv $i $i] - } - } -} -sqlite3_soft_heap_limit $soft_limit - -# -# Check the command-line arguments to set the memory debugger -# backtrace depth. +# sqlite3 db test.db # -# See the sqlite3_memdebug_backtrace() function in mem2.c or -# test_malloc.c for additional information. +# becomes # -for {set i 0} {$i<[llength $argv]} {incr i} { - if {[lindex $argv $i] eq "--malloctrace"} { - set argv [lreplace $argv $i $i] - sqlite3_memdebug_backtrace 10 - sqlite3_memdebug_log start - set tester_do_malloctrace 1 - } -} -for {set i 0} {$i<[llength $argv]} {incr i} { - if {[regexp {^--backtrace=(\d+)$} [lindex $argv $i] all value]} { - sqlite3_memdebug_backtrace $value - set argv [lreplace $argv $i $i] - } -} - - -proc ostrace_call {zCall nClick zFile i32 i64} { - set s "INSERT INTO ostrace VALUES('$zCall', $nClick, '$zFile', $i32, $i64);" - puts $::ostrace_fd $s -} - -for {set i 0} {$i<[llength $argv]} {incr i} { - if {[lindex $argv $i] eq "--ossummary" || [lindex $argv $i] eq "--ostrace"} { - sqlite3_instvfs create -default ostrace - set tester_do_ostrace 1 - set ostrace_fd [open ostrace.sql w] - puts $ostrace_fd "BEGIN;" - if {[lindex $argv $i] eq "--ostrace"} { - set s "CREATE TABLE ostrace" - append s "(method TEXT, clicks INT, file TEXT, i32 INT, i64 INT);" - puts $ostrace_fd $s - sqlite3_instvfs configure ostrace ostrace_call - sqlite3_instvfs configure ostrace ostrace_call - } - set argv [lreplace $argv $i $i] - } - if {[lindex $argv $i] eq "--binarylog"} { - set tester_do_binarylog 1 - set argv [lreplace $argv $i $i] - } -} - -# -# Check the command-line arguments to set the maximum number of -# errors tolerated before halting. +# sqlite3 db test.db -key {xyzzy} # -if {![info exists maxErr]} { - set maxErr 1000 -} -for {set i 0} {$i<[llength $argv]} {incr i} { - if {[regexp {^--maxerror=(\d+)$} [lindex $argv $i] all maxErr]} { - set argv [lreplace $argv $i $i] - } -} -#puts "Max error = $maxErr" - - -# Use the pager codec if it is available -# -if {[sqlite3 -has-codec] && [info command sqlite_orig]==""} { +if {[info command sqlite_orig]==""} { rename sqlite3 sqlite_orig proc sqlite3 {args} { - if {[llength $args]==2 && [string index [lindex $args 0] 0]!="-"} { - lappend args -key {xyzzy} + if {[llength $args]>=2 && [string index [lindex $args 0] 0]!="-"} { + # This command is opening a new database connection. + # + if {[info exists ::G(perm:sqlite3_args)]} { + set args [concat $args $::G(perm:sqlite3_args)] + } + if {[sqlite_orig -has-codec] && ![info exists ::do_not_use_codec]} { + lappend args -key {xyzzy} + } + + set res [uplevel 1 sqlite_orig $args] + if {[info exists ::G(perm:presql)]} { + [lindex $args 0] eval $::G(perm:presql) + } + set res + } else { + # This command is not opening a new database connection. Pass the + # arguments through to the C implemenation as the are. + # + uplevel 1 sqlite_orig $args } - uplevel 1 sqlite_orig $args } } +proc execpresql {handle args} { + trace remove execution $handle enter [list execpresql $handle] + if {[info exists ::G(perm:presql)]} { + $handle eval $::G(perm:presql) + } +} -# Create a test database +# This command should be called after loading tester.tcl from within +# all test scripts that are incompatible with encryption codecs. # -if {![info exists nTest]} { +proc do_not_use_codec {} { + set ::do_not_use_codec 1 + reset_db +} + +# The following block only runs the first time this file is sourced. It +# does not run in slave interpreters (since the ::cmdlinearg array is +# populated before the test script is run in slave interpreters). +# +if {[info exists cmdlinearg]==0} { + + # Parse any options specified in the $argv array. This script accepts the + # following options: + # + # --pause + # --soft-heap-limit=NN + # --maxerror=NN + # --malloctrace=N + # --backtrace=N + # --binarylog=N + # --soak=N + # + set cmdlinearg(soft-heap-limit) 0 + set cmdlinearg(maxerror) 1000 + set cmdlinearg(malloctrace) 0 + set cmdlinearg(backtrace) 10 + set cmdlinearg(binarylog) 0 + set cmdlinearg(soak) 0 + + set leftover [list] + foreach a $argv { + switch -regexp -- $a { + {^-+pause$} { + # Wait for user input before continuing. This is to give the user an + # opportunity to connect profiling tools to the process. + puts -nonewline "Press RETURN to begin..." + flush stdout + gets stdin + } + {^-+soft-heap-limit=.+$} { + foreach {dummy cmdlinearg(soft-heap-limit)} [split $a =] break + } + {^-+maxerror=.+$} { + foreach {dummy cmdlinearg(maxerror)} [split $a =] break + } + {^-+malloctrace=.+$} { + foreach {dummy cmdlinearg(malloctrace)} [split $a =] break + if {$cmdlinearg(malloctrace)} { + sqlite3_memdebug_log start + } + } + {^-+backtrace=.+$} { + foreach {dummy cmdlinearg(backtrace)} [split $a =] break + sqlite3_memdebug_backtrace $value + } + {^-+binarylog=.+$} { + foreach {dummy cmdlinearg(binarylog)} [split $a =] break + } + {^-+soak=.+$} { + foreach {dummy cmdlinearg(soak)} [split $a =] break + set ::G(issoak) $cmdlinearg(soak) + } + default { + lappend leftover $a + } + } + } + set argv $leftover + + # Install the malloc layer used to inject OOM errors. And the 'automatic' + # extensions. This only needs to be done once for the process. + # sqlite3_shutdown install_malloc_faultsim 1 sqlite3_initialize autoinstall_test_functions - if {[info exists tester_do_binarylog]} { - sqlite3_instvfs binarylog -default binarylog ostrace.bin - sqlite3_instvfs marker binarylog "$argv0 $argv" + + # If the --binarylog option was specified, create the logging VFS. This + # call installs the new VFS as the default for all SQLite connections. + # + if {$cmdlinearg(binarylog)} { + vfslog new binarylog {} vfslog.bin + } + + # Set the backtrace depth, if malloc tracing is enabled. + # + if {$cmdlinearg(malloctrace)} { + sqlite3_memdebug_backtrace $cmdlinearg(backtrace) } } +# Update the soft-heap-limit each time this script is run. In that +# way if an individual test file changes the soft-heap-limit, it +# will be reset at the start of the next test file. +# +sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) + +# Create a test database +# proc reset_db {} { catch {db close} file delete -force test.db file delete -force test.db-journal + file delete -force test.db-wal sqlite3 db ./test.db set ::DB [sqlite3_connection_pointer db] if {[info exists ::SETUP_SQL]} { @@ -153,71 +242,102 @@ reset_db # Abort early if this script has been run before. # -if {[info exists nTest]} return +if {[info exists TC(count)]} return -# Set the test counters to zero +# Initialize the test counters and set up commands to access them. +# Or, if this is a slave interpreter, set up aliases to write the +# counters in the parent interpreter. # -set nErr 0 -set nTest 0 -set skip_test 0 -set failList {} -set omitList {} -if {![info exists speedTest]} { - set speedTest 0 +if {0==[info exists ::SLAVE]} { + set TC(errors) 0 + set TC(count) 0 + set TC(fail_list) [list] + set TC(omit_list) [list] + + proc set_test_counter {counter args} { + if {[llength $args]} { + set ::TC($counter) [lindex $args 0] + } + set ::TC($counter) + } } # Record the fact that a sequence of tests were omitted. # proc omit_test {name reason} { - global omitList + set omitList [set_test_counter omit_list] lappend omitList [list $name $reason] + set_test_counter omit_list $omitList } +# Record the fact that a test failed. +# +proc fail_test {name} { + set f [set_test_counter fail_list] + lappend f $name + set_test_counter fail_list $f + set_test_counter errors [expr [set_test_counter errors] + 1] + + set nFail [set_test_counter errors] + if {$nFail>=$::cmdlinearg(maxerror)} { + puts "*** Giving up..." + finalize_testing + } +} + +# Increment the number of tests run +# +proc incr_ntest {} { + set_test_counter count [expr [set_test_counter count] + 1] +} + + # Invoke the do_test procedure to run a single test # proc do_test {name cmd expected} { - global argv nErr nTest skip_test maxErr + + global argv cmdlinearg + sqlite3_memdebug_settitle $name - if {[info exists ::tester_do_binarylog]} { - sqlite3_instvfs marker binarylog "Start of $name" + +# if {[llength $argv]==0} { +# set go 1 +# } else { +# set go 0 +# foreach pattern $argv { +# if {[string match $pattern $name]} { +# set go 1 +# break +# } +# } +# } + + if {[info exists ::G(perm:prefix)]} { + set name "$::G(perm:prefix)$name" } - if {$skip_test} { - set skip_test 0 - return - } - if {[llength $argv]==0} { - set go 1 - } else { - set go 0 - foreach pattern $argv { - if {[string match $pattern $name]} { - set go 1 - break - } - } - } - if {!$go} return - incr nTest + + incr_ntest puts -nonewline $name... flush stdout if {[catch {uplevel #0 "$cmd;\n"} result]} { puts "\nError: $result" - incr nErr - lappend ::failList $name - if {$nErr>$maxErr} {puts "*** Giving up..."; finalize_testing} + fail_test $name } elseif {[string compare $result $expected]} { puts "\nExpected: \[$expected\]\n Got: \[$result\]" - incr nErr - lappend ::failList $name - if {$nErr>=$maxErr} {puts "*** Giving up..."; finalize_testing} + fail_test $name } else { puts " Ok" } flush stdout - if {[info exists ::tester_do_binarylog]} { - sqlite3_instvfs marker binarylog "End of $name" - } } + +proc do_execsql_test {testname sql result} { + uplevel do_test $testname [list "execsql {$sql}"] [list $result] +} +proc do_catchsql_test {testname sql result} { + uplevel do_test $testname [list "catchsql {$sql}"] [list $result] +} + # Run an SQL script. # Return the number of microseconds per statement. @@ -255,6 +375,10 @@ proc speed_trial_tcl {name numstmt units script} { proc speed_trial_init {name} { global total_time set total_time 0 + sqlite3 versdb :memory: + set vers [versdb one {SELECT sqlite_source_id()}] + versdb close + puts "SQLite $vers" } proc speed_trial_summary {name} { global total_time @@ -264,10 +388,15 @@ proc speed_trial_summary {name} { # Run this routine last # proc finish_test {} { - finalize_testing + catch {db close} + catch {db2 close} + catch {db3 close} + if {0==[info exists ::SLAVE]} { finalize_testing } } proc finalize_testing {} { - global nTest nErr sqlite_open_file_count omitList + global sqlite_open_file_count + + set omitList [set_test_counter omit_list] catch {db close} catch {db2 close} @@ -278,18 +407,14 @@ proc finalize_testing {} { # sqlite3_clear_tsd_memdebug db close sqlite3_reset_auto_extension - set heaplimit [sqlite3_soft_heap_limit] - if {$heaplimit!=$::soft_limit} { - puts "soft-heap-limit changed by this script\ - from $::soft_limit to $heaplimit" - } elseif {$heaplimit!="" && $heaplimit>0} { - puts "soft-heap-limit set to $heaplimit" - } + sqlite3_soft_heap_limit 0 - incr nTest + set nTest [incr_ntest] + set nErr [set_test_counter errors] + puts "$nErr errors out of $nTest tests" if {$nErr>0} { - puts "Failures on these tests: $::failList" + puts "Failures on these tests: [set_test_counter fail_list]" } run_thread_tests 1 if {[llength $omitList]>0} { @@ -309,27 +434,13 @@ proc finalize_testing {} { puts "in your TCL build." puts "******************************************************************" } - if {[info exists ::tester_do_binarylog]} { - sqlite3_instvfs destroy binarylog + if {$::cmdlinearg(binarylog)} { + vfslog finalize binarylog } if {$sqlite_open_file_count} { puts "$sqlite_open_file_count files were left open" incr nErr } - if {[info exists ::tester_do_ostrace]} { - puts "Writing ostrace.sql..." - set fd $::ostrace_fd - - puts -nonewline $fd "CREATE TABLE ossummary" - puts $fd "(method TEXT, clicks INTEGER, count INTEGER);" - foreach row [sqlite3_instvfs report ostrace] { - foreach {method count clicks} $row break - puts $fd "INSERT INTO ossummary VALUES('$method', $clicks, $count);" - } - puts $fd "COMMIT;" - close $fd - sqlite3_instvfs destroy ostrace - } if {[sqlite3_memory_used]>0} { puts "Unfreed memory: [sqlite3_memory_used] bytes" incr nErr @@ -349,7 +460,7 @@ proc finalize_testing {} { if {[info commands sqlite3_memdebug_malloc_count] ne ""} { puts "Number of malloc() : [sqlite3_memdebug_malloc_count] calls" } - if {[info exists ::tester_do_malloctrace]} { + if {$::cmdlinearg(malloctrace)} { puts "Writing mallocs.sql..." memdebug_log_sql sqlite3_memdebug_log stop @@ -543,9 +654,6 @@ proc ifcapable {expr code {else ""} {elsecode ""}} { # crashsql -delay CRASHDELAY -file CRASHFILE ?-blocksize BLOCKSIZE? $sql # proc crashsql {args} { - if {$::tcl_platform(platform)!="unix"} { - error "crashsql should only be used on unix" - } set blocksize "" set crashdelay 1 @@ -573,7 +681,10 @@ proc crashsql {args} { error "Compulsory option -file missing" } - set cfile [file join [pwd] $crashfile] + # $crashfile gets compared to the native filename in + # cfSync(), which can be different then what TCL uses by + # default, so here we force it to the "nativename" format. + set cfile [string map {\\ \\\\} [file nativename [file join [pwd] $crashfile]]] set f [open crash.tcl w] puts $f "sqlite3_crash_enable 1" @@ -602,10 +713,20 @@ proc crashsql {args} { puts $f "}" } close $f - set r [catch { exec [info nameofexec] crash.tcl >@stdout } msg] + + # Windows/ActiveState TCL returns a slightly different + # error message. We map that to the expected message + # so that we don't have to change all of the test + # cases. + if {$::tcl_platform(platform)=="windows"} { + if {$msg=="child killed: unknown signal"} { + set msg "child process exited abnormally" + } + } + lappend r $msg } @@ -748,6 +869,7 @@ proc do_ioerr_test {testname args} { # 1. We never hit the IO error and the SQL returned OK # 2. An IO error was hit and the SQL failed # + #puts "s=$s r=$r q=$q" expr { ($s && !$r && !$q) || (!$s && $r && $q) } } {1} @@ -982,6 +1104,127 @@ proc drop_all_tables {{db db}} { } } +#------------------------------------------------------------------------- +# If a test script is executed with global variable $::G(perm:name) set to +# "wal", then the tests are run in WAL mode. Otherwise, they should be run +# in rollback mode. The following Tcl procs are used to make this less +# intrusive: +# +# wal_set_journal_mode ?DB? +# +# If running a WAL test, execute "PRAGMA journal_mode = wal" using +# connection handle DB. Otherwise, this command is a no-op. +# +# wal_check_journal_mode TESTNAME ?DB? +# +# If running a WAL test, execute a tests case that fails if the main +# database for connection handle DB is not currently a WAL database. +# Otherwise (if not running a WAL permutation) this is a no-op. +# +# wal_is_wal_mode +# +# Returns true if this test should be run in WAL mode. False otherwise. +# +proc wal_is_wal_mode {} { + expr {[permutation] eq "wal"} +} +proc wal_set_journal_mode {{db db}} { + if { [wal_is_wal_mode] } { + $db eval "PRAGMA journal_mode = WAL" + } +} +proc wal_check_journal_mode {testname {db db}} { + if { [wal_is_wal_mode] } { + $db eval { SELECT * FROM sqlite_master } + do_test $testname [list $db eval "PRAGMA main.journal_mode"] {wal} + } +} + +proc permutation {} { + set perm "" + catch {set perm $::G(perm:name)} + set perm +} + +#------------------------------------------------------------------------- +# +proc slave_test_script {script} { + + # Create the interpreter used to run the test script. + interp create tinterp + + # Populate some global variables that tester.tcl expects to see. + foreach {var value} [list \ + ::argv0 $::argv0 \ + ::argv {} \ + ::SLAVE 1 \ + ] { + interp eval tinterp [list set $var $value] + } + + # The alias used to access the global test counters. + tinterp alias set_test_counter set_test_counter + + # Set up the ::cmdlinearg array in the slave. + interp eval tinterp [list array set ::cmdlinearg [array get ::cmdlinearg]] + + # Set up the ::G array in the slave. + interp eval tinterp [list array set ::G [array get ::G]] + + # Load the various test interfaces implemented in C. + load_testfixture_extensions tinterp + + # Run the test script. + interp eval tinterp $script + + # Check if the interpreter call [run_thread_tests] + if { [interp eval tinterp {info exists ::run_thread_tests_called}] } { + set ::run_thread_tests_called 1 + } + + # Delete the interpreter used to run the test script. + interp delete tinterp +} + +proc slave_test_file {zFile} { + set tail [file tail $zFile] + + # Remember the value of the shared-cache setting. So that it is possible + # to check afterwards that it was not modified by the test script. + # + ifcapable shared_cache { set scs [sqlite3_enable_shared_cache] } + + # Run the test script in a slave interpreter. + # + unset -nocomplain ::run_thread_tests_called + reset_prng_state + set ::sqlite_open_file_count 0 + set time [time { slave_test_script [list source $zFile] }] + set ms [expr [lindex $time 0] / 1000] + + # Test that all files opened by the test script were closed. Omit this + # if the test script has "thread" in its name. The open file counter + # is not thread-safe. + # + if {[info exists ::run_thread_tests_called]==0} { + do_test ${tail}-closeallfiles { expr {$::sqlite_open_file_count>0} } {0} + } + set ::sqlite_open_file_count 0 + + # Test that the global "shared-cache" setting was not altered by + # the test script. + # + ifcapable shared_cache { + set res [expr {[sqlite3_enable_shared_cache] == $scs}] + do_test ${tail}-sharedcachesetting [list set {} $res] 1 + } + + # Add some info to the output. + # + puts "Time: $tail $ms ms" + show_memstats +} + # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. diff --git a/test/thread2.test b/test/thread2.test index 2ec75a2..253b3d6 100644 --- a/test/thread2.test +++ b/test/thread2.test @@ -29,10 +29,6 @@ if {[llength [info command thread_step]]==0 || [sqlite3 -has-codec]} { finish_test return } -if {![info exists threadsOverrideEachOthersLocks]} { - finish_test - return -} # Create some data to work with # @@ -121,118 +117,6 @@ do_test thread2-2.9 { thread_halt A thread_halt B -# Save the original (correct) value of threadsOverrideEachOthersLocks -# so that it can be restored. If this value is left set incorrectly, lots -# of things will go wrong in future tests. -# -set orig_threadOverride $threadsOverrideEachOthersLocks - -# Pretend we are on a system (like RedHat9) were threads do not -# override each others locks. -# -set threadsOverrideEachOthersLocks 0 - -# Verify that we can move database connections between threads as -# long as no locks are held. -# -do_test thread2-3.1 { - thread_create A test.db - set DB [thread_db_get A] - thread_halt A -} {} -do_test thread2-3.2 { - set STMT [sqlite3_prepare $DB {SELECT a FROM t1 LIMIT 1} -1 TAIL] - sqlite3_step $STMT -} SQLITE_ROW -do_test thread2-3.3 { - sqlite3_column_int $STMT 0 -} 1 -do_test thread2-3.4 { - sqlite3_finalize $STMT -} SQLITE_OK -do_test thread2-3.5 { - set STMT [sqlite3_prepare $DB {SELECT max(a) FROM t1} -1 TAIL] - sqlite3_step $STMT -} SQLITE_ROW -do_test thread2-3.6 { - sqlite3_column_int $STMT 0 -} 8 -do_test thread2-3.7 { - sqlite3_finalize $STMT -} SQLITE_OK -do_test thread2-3.8 { - sqlite3_close $DB -} {SQLITE_OK} - -do_test thread2-3.10 { - thread_create A test.db - thread_compile A {SELECT a FROM t1 LIMIT 1} - thread_step A - thread_finalize A - set DB [thread_db_get A] - thread_halt A -} {} -do_test thread2-3.11 { - set STMT [sqlite3_prepare $DB {SELECT a FROM t1 LIMIT 1} -1 TAIL] - sqlite3_step $STMT -} SQLITE_ROW -do_test thread2-3.12 { - sqlite3_column_int $STMT 0 -} 1 -do_test thread2-3.13 { - sqlite3_finalize $STMT -} SQLITE_OK -do_test thread2-3.14 { - sqlite3_close $DB -} SQLITE_OK - -do_test thread2-3.20 { - thread_create A test.db - thread_compile A {SELECT a FROM t1 LIMIT 3} - thread_step A - set STMT [thread_stmt_get A] - set DB [thread_db_get A] - sqlite3_step $STMT -} SQLITE_ROW -do_test thread2-3.22 { - sqlite3_column_int $STMT 0 -} 2 -do_test thread2-3.23 { - # The unlock fails here. But because we never check the return - # code from sqlite3OsUnlock (because we cannot do anything about it - # if it fails) we do not realize that an error has occurred. - breakpoint - sqlite3_finalize $STMT -} SQLITE_OK -do_test thread2-3.25 { - thread_db_put A $DB - thread_halt A -} {} - -do_test thread2-3.30 { - thread_create A test.db - thread_compile A {BEGIN} - thread_step A - thread_finalize A - thread_compile A {SELECT a FROM t1 LIMIT 1} - thread_step A - thread_finalize A - set DB [thread_db_get A] - set STMT [sqlite3_prepare $DB {INSERT INTO t1 VALUES(99,'error')} -1 TAIL] - sqlite3_step $STMT -} SQLITE_ERROR -do_test thread2-3.32 { - sqlite3_finalize $STMT -} SQLITE_MISUSE -do_test thread2-3.33 { - thread_db_put A $DB - thread_halt A -} {} - -# VERY important to set the override flag back to its true value. -# -set threadsOverrideEachOthersLocks $orig_threadOverride - # Also important to halt the worker threads, which are using spin # locks and eating away CPU cycles. # diff --git a/test/thread_common.tcl b/test/thread_common.tcl index bbd9389..9b7a95d 100644 --- a/test/thread_common.tcl +++ b/test/thread_common.tcl @@ -80,7 +80,7 @@ set thread_procs { } proc thread_spawn {varname args} { - sqlthread spawn $varname [join $args ;] + sqlthread spawn $varname [join $args {;}] } # Return true if this build can run the multi-threaded tests. @@ -96,16 +96,10 @@ proc run_thread_tests {{print_warning 0}} { set zProblem "Linked against a non-threadsafe Tcl build" } if {[info exists zProblem]} { - if {$print_warning} { - if {[info exists ::run_thread_tests_failed]} { - puts "WARNING: Multi-threaded tests skipped: $zProblem" - } - } else { - puts "Skipping thread tests: $zProblem" - set ::run_thread_tests_failed 1 - } + puts "WARNING: Multi-threaded tests skipped: $zProblem" return 0 } + set ::run_thread_tests_called 1 return 1; } diff --git a/test/tkt-02a8e81d44.test b/test/tkt-02a8e81d44.test new file mode 100644 index 0000000..4a48fb0 --- /dev/null +++ b/test/tkt-02a8e81d44.test @@ -0,0 +1,31 @@ +# 2010 April 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket [02a8e81d44] has been +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt-02a838-1.1 { + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(4); + INSERT INTO t1 VALUES(5); + SELECT * FROM (SELECT a FROM t1 LIMIT 1) UNION ALL SELECT 3; + } +} {1 3} + +finish_test diff --git a/test/tkt-26ff0c2d1e.test b/test/tkt-26ff0c2d1e.test new file mode 100644 index 0000000..83a4f3d --- /dev/null +++ b/test/tkt-26ff0c2d1e.test @@ -0,0 +1,33 @@ +# 2010 May 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing a bug found in the OP_Variable optimizer +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test bug-20100512-1 { + set DB [sqlite3_connection_pointer db] + set SQL {SELECT case when 1 then 99 else ? end + ?} + set STMT [sqlite3_prepare_v2 $DB $SQL -1 TAIL] + set TAIL +} {} +do_test bug-20100512-2 { + sqlite3_bind_parameter_count $STMT +} 2 +do_test bug-20100512-3 { + sqlite3_bind_int $STMT 1 123 + sqlite3_bind_int $STMT 2 456 + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {555} +sqlite3_finalize $STMT diff --git a/test/tkt-80e031a00f.test b/test/tkt-80e031a00f.test new file mode 100644 index 0000000..95372ab --- /dev/null +++ b/test/tkt-80e031a00f.test @@ -0,0 +1,206 @@ +# 2010 July 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests that ticket [80e031a00f45dca877ed92b225209cfa09280f4f] has been +# resolved. That ticket is about IN and NOT IN operators with empty-set +# right-hand sides. Such expressions should always return TRUE or FALSE +# even if the left-hand side is NULL. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +# EVIDENCE-OF: R-58875-56087 The IN and NOT IN operators take a single +# scalar operand on the left and a vector operand on the right formed by +# an explicit list of zero or more scalars or by a single subquery. +# +# EVIDENCE-OF: R-52275-55503 When the right operand is an empty set, the +# result of IN is false and the result of NOT IN is true, regardless of +# the left operand and even if the left operand is NULL. +# +# EVIDENCE-OF: R-13595-45863 Note that SQLite allows the parenthesized +# list of scalar values on the right-hand side of an IN or NOT IN +# operator to be an empty list but most other SQL database database +# engines and the SQL92 standard require the list to contain at least +# one element. +# +do_execsql_test tkt-80e031a00f.1 {SELECT 1 IN ()} 0 +do_execsql_test tkt-80e031a00f.1b {SELECT 1 IN (2)} 0 +do_execsql_test tkt-80e031a00f.1c {SELECT 1 IN (2,3,4,5,6,7,8,9)} 0 +do_execsql_test tkt-80e031a00f.2 {SELECT 1 NOT IN ()} 1 +do_execsql_test tkt-80e031a00f.2b {SELECT 1 NOT IN (2)} 1 +do_execsql_test tkt-80e031a00f.2c {SELECT 1 NOT IN (2,3,4,5,6,7,8,9)} 1 +do_execsql_test tkt-80e031a00f.3 {SELECT null IN ()} 0 +do_execsql_test tkt-80e031a00f.4 {SELECT null NOT IN ()} 1 +do_execsql_test tkt-80e031a00f.5 { + CREATE TABLE t1(x); + SELECT 1 IN t1; +} 0 +do_execsql_test tkt-80e031a00f.6 {SELECT 1 NOT IN t1} 1 +do_execsql_test tkt-80e031a00f.7 {SELECT null IN t1} 0 +do_execsql_test tkt-80e031a00f.8 {SELECT null NOT IN t1} 1 +do_execsql_test tkt-80e031a00f.9 { + CREATE TABLE t2(y INTEGER PRIMARY KEY); + SELECT 1 IN t2; +} 0 +do_execsql_test tkt-80e031a00f.10 {SELECT 1 NOT IN t2} 1 +do_execsql_test tkt-80e031a00f.11 {SELECT null IN t2} 0 +do_execsql_test tkt-80e031a00f.12 {SELECT null NOT IN t2} 1 +do_execsql_test tkt-80e031a00f.13 { + CREATE TABLE t3(z INT UNIQUE); + SELECT 1 IN t3; +} 0 +do_execsql_test tkt-80e031a00f.14 {SELECT 1 NOT IN t3} 1 +do_execsql_test tkt-80e031a00f.15 {SELECT null IN t3} 0 +do_execsql_test tkt-80e031a00f.16 {SELECT null NOT IN t3} 1 +do_execsql_test tkt-80e031a00f.17 {SELECT 1 IN (SELECT x+y FROM t1, t2)} 0 +do_execsql_test tkt-80e031a00f.18 {SELECT 1 NOT IN (SELECT x+y FROM t1,t2)} 1 +do_execsql_test tkt-80e031a00f.19 {SELECT null IN (SELECT x+y FROM t1,t2)} 0 +do_execsql_test tkt-80e031a00f.20 {SELECT null NOT IN (SELECT x+y FROM t1,t2)} 1 +do_execsql_test tkt-80e031a00f.21 {SELECT 1.23 IN ()} 0 +do_execsql_test tkt-80e031a00f.22 {SELECT 1.23 NOT IN ()} 1 +do_execsql_test tkt-80e031a00f.23 {SELECT 1.23 IN t1} 0 +do_execsql_test tkt-80e031a00f.24 {SELECT 1.23 NOT IN t1} 1 +do_execsql_test tkt-80e031a00f.25 {SELECT 'hello' IN ()} 0 +do_execsql_test tkt-80e031a00f.26 {SELECT 'hello' NOT IN ()} 1 +do_execsql_test tkt-80e031a00f.27 {SELECT 'hello' IN t1} 0 +do_execsql_test tkt-80e031a00f.28 {SELECT 'hello' NOT IN t1} 1 +do_execsql_test tkt-80e031a00f.29 {SELECT x'303132' IN ()} 0 +do_execsql_test tkt-80e031a00f.30 {SELECT x'303132' NOT IN ()} 1 +do_execsql_test tkt-80e031a00f.31 {SELECT x'303132' IN t1} 0 +do_execsql_test tkt-80e031a00f.32 {SELECT x'303132' NOT IN t1} 1 + +# EVIDENCE-OF: R-50221-42915 The result of an IN or NOT IN operator is +# determined by the following matrix: Left operand is NULL Right operand +# contains NULL Right operand is an empty set Left operand found within +# right operand Result of IN operator Result of NOT IN operator no no no +# no false true does not matter no yes no false true no does not matter +# no yes true false no yes no no NULL NULL yes does not matter no does +# not matter NULL NULL +# +# Row 1: +do_execsql_test tkt-80e031a00f.100 {SELECT 1 IN (2,3,4)} 0 +do_execsql_test tkt-80e031a00f.101 {SELECT 1 NOT IN (2,3,4)} 1 +do_execsql_test tkt-80e031a00f.102 {SELECT 'a' IN ('b','c','d')} 0 +do_execsql_test tkt-80e031a00f.103 {SELECT 'a' NOT IN ('b','c','d')} 1 +do_test tkt-80e031a00f.104 { + db eval { + CREATE TABLE t4(a UNIQUE); + CREATE TABLE t5(b INTEGER PRIMARY KEY); + CREATE TABLE t6(c); + INSERT INTO t4 VALUES(2); + INSERT INTO t4 VALUES(3); + INSERT INTO t4 VALUES(4); + INSERT INTO t5 SELECT * FROM t4; + INSERT INTO t6 SELECT * FROM t4; + CREATE TABLE t4n(a UNIQUE); + CREATE TABLE t6n(c); + INSERT INTO t4n SELECT * FROM t4; + INSERT INTO t4n VALUES(null); + INSERT INTO t6n SELECT * FROM t4n; + CREATE TABLE t7(a UNIQUE); + CREATE TABLE t8(c); + INSERT INTO t7 VALUES('b'); + INSERT INTO t7 VALUES('c'); + INSERT INTO t7 VALUES('d'); + INSERT INTO t8 SELECT * FROM t7; + CREATE TABLE t7n(a UNIQUE); + CREATE TABLE t8n(c); + INSERT INTO t7n SELECT * FROM t7; + INSERT INTO t7n VALUES(null); + INSERT INTO t8n SELECT * FROM t7n; + } + execsql {SELECT 1 IN t4} +} 0 +do_execsql_test tkt-80e031a00f.105 {SELECT 1 NOT IN t4} 1 +do_execsql_test tkt-80e031a00f.106 {SELECT 1 IN t5} 0 +do_execsql_test tkt-80e031a00f.107 {SELECT 1 NOT IN t5} 1 +do_execsql_test tkt-80e031a00f.108 {SELECT 1 IN t6} 0 +do_execsql_test tkt-80e031a00f.109 {SELECT 1 NOT IN t6} 1 +do_execsql_test tkt-80e031a00f.110 {SELECT 'a' IN t7} 0 +do_execsql_test tkt-80e031a00f.111 {SELECT 'a' NOT IN t7} 1 +do_execsql_test tkt-80e031a00f.112 {SELECT 'a' IN t8} 0 +do_execsql_test tkt-80e031a00f.113 {SELECT 'a' NOT IN t8} 1 +# +# Row 2 is tested by cases 1-32 above. +# Row 3: +do_execsql_test tkt-80e031a00f.300 {SELECT 2 IN (2,3,4,null)} 1 +do_execsql_test tkt-80e031a00f.301 {SELECT 3 NOT IN (2,3,4,null)} 0 +do_execsql_test tkt-80e031a00f.302 {SELECT 4 IN (2,3,4)} 1 +do_execsql_test tkt-80e031a00f.303 {SELECT 2 NOT IN (2,3,4)} 0 +do_execsql_test tkt-80e031a00f.304 {SELECT 'b' IN ('b','c','d')} 1 +do_execsql_test tkt-80e031a00f.305 {SELECT 'c' NOT IN ('b','c','d')} 0 +do_execsql_test tkt-80e031a00f.306 {SELECT 'd' IN ('b','c',null,'d')} 1 +do_execsql_test tkt-80e031a00f.307 {SELECT 'b' NOT IN (null,'b','c','d')} 0 +do_execsql_test tkt-80e031a00f.308 {SELECT 2 IN t4} 1 +do_execsql_test tkt-80e031a00f.309 {SELECT 3 NOT IN t4} 0 +do_execsql_test tkt-80e031a00f.310 {SELECT 4 IN t4n} 1 +do_execsql_test tkt-80e031a00f.311 {SELECT 2 NOT IN t4n} 0 +do_execsql_test tkt-80e031a00f.312 {SELECT 2 IN t5} 1 +do_execsql_test tkt-80e031a00f.313 {SELECT 3 NOT IN t5} 0 +do_execsql_test tkt-80e031a00f.314 {SELECT 2 IN t6} 1 +do_execsql_test tkt-80e031a00f.315 {SELECT 3 NOT IN t6} 0 +do_execsql_test tkt-80e031a00f.316 {SELECT 4 IN t6n} 1 +do_execsql_test tkt-80e031a00f.317 {SELECT 2 NOT IN t6n} 0 +do_execsql_test tkt-80e031a00f.318 {SELECT 'b' IN t7} 1 +do_execsql_test tkt-80e031a00f.319 {SELECT 'c' NOT IN t7} 0 +do_execsql_test tkt-80e031a00f.320 {SELECT 'c' IN t7n} 1 +do_execsql_test tkt-80e031a00f.321 {SELECT 'd' NOT IN t7n} 0 +do_execsql_test tkt-80e031a00f.322 {SELECT 'b' IN t8} 1 +do_execsql_test tkt-80e031a00f.323 {SELECT 'c' NOT IN t8} 0 +do_execsql_test tkt-80e031a00f.324 {SELECT 'c' IN t8n} 1 +do_execsql_test tkt-80e031a00f.325 {SELECT 'd' NOT IN t8n} 0 +# +# Row 4: +do_execsql_test tkt-80e031a00f.400 {SELECT 1 IN (2,3,4,null)} {{}} +do_execsql_test tkt-80e031a00f.401 {SELECT 1 NOT IN (2,3,4,null)} {{}} +do_execsql_test tkt-80e031a00f.402 {SELECT 'a' IN ('b','c',null,'d')} {{}} +do_execsql_test tkt-80e031a00f.403 {SELECT 'a' NOT IN (null,'b','c','d')} {{}} +do_execsql_test tkt-80e031a00f.404 {SELECT 1 IN t4n} {{}} +do_execsql_test tkt-80e031a00f.405 {SELECT 5 NOT IN t4n} {{}} +do_execsql_test tkt-80e031a00f.406 {SELECT 6 IN t6n} {{}} +do_execsql_test tkt-80e031a00f.407 {SELECT 7 NOT IN t6n} {{}} +do_execsql_test tkt-80e031a00f.408 {SELECT 'a' IN t7n} {{}} +do_execsql_test tkt-80e031a00f.409 {SELECT 'e' NOT IN t7n} {{}} +do_execsql_test tkt-80e031a00f.410 {SELECT 'f' IN t8n} {{}} +do_execsql_test tkt-80e031a00f.411 {SELECT 'g' NOT IN t8n} {{}} +# +# Row 5: +do_execsql_test tkt-80e031a00f.500 {SELECT null IN (2,3,4,null)} {{}} +do_execsql_test tkt-80e031a00f.501 {SELECT null NOT IN (2,3,4,null)} {{}} +do_execsql_test tkt-80e031a00f.502 {SELECT null IN (2,3,4)} {{}} +do_execsql_test tkt-80e031a00f.503 {SELECT null NOT IN (2,3,4)} {{}} +do_execsql_test tkt-80e031a00f.504 {SELECT null IN ('b','c','d')} {{}} +do_execsql_test tkt-80e031a00f.505 {SELECT null NOT IN ('b','c','d')} {{}} +do_execsql_test tkt-80e031a00f.506 {SELECT null IN ('b','c',null,'d')} {{}} +do_execsql_test tkt-80e031a00f.507 {SELECT null NOT IN (null,'b','c','d')} {{}} +do_execsql_test tkt-80e031a00f.508 {SELECT null IN t4} {{}} +do_execsql_test tkt-80e031a00f.509 {SELECT null NOT IN t4} {{}} +do_execsql_test tkt-80e031a00f.510 {SELECT null IN t4n} {{}} +do_execsql_test tkt-80e031a00f.511 {SELECT null NOT IN t4n} {{}} +do_execsql_test tkt-80e031a00f.512 {SELECT null IN t5} {{}} +do_execsql_test tkt-80e031a00f.513 {SELECT null NOT IN t5} {{}} +do_execsql_test tkt-80e031a00f.514 {SELECT null IN t6} {{}} +do_execsql_test tkt-80e031a00f.515 {SELECT null NOT IN t6} {{}} +do_execsql_test tkt-80e031a00f.516 {SELECT null IN t6n} {{}} +do_execsql_test tkt-80e031a00f.517 {SELECT null NOT IN t6n} {{}} +do_execsql_test tkt-80e031a00f.518 {SELECT null IN t7} {{}} +do_execsql_test tkt-80e031a00f.519 {SELECT null NOT IN t7} {{}} +do_execsql_test tkt-80e031a00f.520 {SELECT null IN t7n} {{}} +do_execsql_test tkt-80e031a00f.521 {SELECT null NOT IN t7n} {{}} +do_execsql_test tkt-80e031a00f.522 {SELECT null IN t8} {{}} +do_execsql_test tkt-80e031a00f.523 {SELECT null NOT IN t8} {{}} +do_execsql_test tkt-80e031a00f.524 {SELECT null IN t8n} {{}} +do_execsql_test tkt-80e031a00f.525 {SELECT null NOT IN t8n} {{}} + +finish_test diff --git a/test/tkt-9d68c883.test b/test/tkt-9d68c883.test new file mode 100644 index 0000000..18dc6cc --- /dev/null +++ b/test/tkt-9d68c883.test @@ -0,0 +1,53 @@ +# 2010 April 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file tests that bug 9d68c883132c8e9ffcd5b0c148c990807b5df1b7 +# is fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt-9d68c88-1.1 { + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = 2; + CREATE TABLE t3(x); + CREATE TABLE t4(x); + CREATE TABLE t5(x); + INSERT INTO t5 VALUES(randomblob(1500)); + CREATE TABLE t7(x); + CREATE TABLE t8(x); + } +} {} + + +for {set i 0} {$i < 100} {incr i} { + db close + sqlite3_simulate_device -sectorsize 8192 + sqlite3 db test.db -vfs devsym + + do_test tkt-9d68c88-2.$i { + execsql { + BEGIN; + DELETE FROM t5; + INSERT INTO t8 VALUES('hello world'); + } + + sqlite3_memdebug_fail $i -repeat 0 + catchsql { DROP TABLE t7; } + sqlite3_memdebug_fail -1 + + catchsql { ROLLBACK } + execsql { PRAGMA integrity_check } + } {ok} +} + +finish_test diff --git a/test/tkt-cbd054fa6b.test b/test/tkt-cbd054fa6b.test new file mode 100644 index 0000000..6e7455b --- /dev/null +++ b/test/tkt-cbd054fa6b.test @@ -0,0 +1,87 @@ +# 2010 March 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements tests to verify that ticket [cbd054fa6b] has been +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !stat2 { + finish_test + return +} + +do_test tkt-cbd05-1.1 { + db eval { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT UNIQUE NOT NULL); + CREATE INDEX t1_x ON t1(b); + INSERT INTO t1 VALUES (NULL, ''); + INSERT INTO t1 VALUES (NULL, 'A'); + INSERT INTO t1 VALUES (NULL, 'B'); + INSERT INTO t1 VALUES (NULL, 'C'); + INSERT INTO t1 VALUES (NULL, 'D'); + INSERT INTO t1 VALUES (NULL, 'E'); + INSERT INTO t1 VALUES (NULL, 'F'); + INSERT INTO t1 VALUES (NULL, 'G'); + INSERT INTO t1 VALUES (NULL, 'H'); + INSERT INTO t1 VALUES (NULL, 'I'); + SELECT count(*) FROM t1; + } +} {10} +do_test tkt-cbd05-1.2 { + db eval { + ANALYZE; + } +} {} +do_test tkt-cbd05-1.3 { + execsql { + SELECT tbl,idx,group_concat(sample,' ') + FROM sqlite_stat2 + WHERE idx = 't1_x' + GROUP BY tbl,idx + } +} {t1 t1_x { A B C D E F G H I}} + +do_test tkt-cbd05-2.1 { + db eval { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB UNIQUE NOT NULL); + CREATE INDEX t1_x ON t1(b); + INSERT INTO t1 VALUES(NULL, X''); + INSERT INTO t1 VALUES(NULL, X'41'); + INSERT INTO t1 VALUES(NULL, X'42'); + INSERT INTO t1 VALUES(NULL, X'43'); + INSERT INTO t1 VALUES(NULL, X'44'); + INSERT INTO t1 VALUES(NULL, X'45'); + INSERT INTO t1 VALUES(NULL, X'46'); + INSERT INTO t1 VALUES(NULL, X'47'); + INSERT INTO t1 VALUES(NULL, X'48'); + INSERT INTO t1 VALUES(NULL, X'49'); + SELECT count(*) FROM t1; + } +} {10} +do_test tkt-cbd05-2.2 { + db eval { + ANALYZE; + } +} {} +do_test tkt-cbd05-2.3 { + execsql { + SELECT tbl,idx,group_concat(sample,' ') + FROM sqlite_stat2 + WHERE idx = 't1_x' + GROUP BY tbl,idx + } +} {t1 t1_x { A B C D E F G H I}} + +finish_test diff --git a/test/tkt-d11f09d36e.test b/test/tkt-d11f09d36e.test new file mode 100644 index 0000000..7065770 --- /dev/null +++ b/test/tkt-d11f09d36e.test @@ -0,0 +1,62 @@ +# 2010 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test that the bug reported by ticket d11f09d36e7cb0821e01f4 has +# been fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} +db func a_string a_string + +do_test tkt-d11f09d36e.1 { + execsql { + PRAGMA synchronous = NORMAL; + PRAGMA cache_size = 10; + CREATE TABLE t1(x, y, UNIQUE(x, y)); + BEGIN; + } + for {set i 0} {$i < 10000} {incr i} { + execsql { INSERT INTO t1 VALUES($i, $i) } + } + execsql COMMIT +} {} +do_test tkt-d11f09d36e.2 { + execsql { + BEGIN; + UPDATE t1 set x = x+10000; + ROLLBACK; + } +} {} +do_test tkt-d11f09d36e.3 { + execsql { PRAGMA integrity_check } +} {ok} +do_test tkt-d11f09d36e.4 { + execsql { + SAVEPOINT tr; + UPDATE t1 set x = x+10000; + ROLLBACK TO tr; + RELEASE tr; + } +} {} +do_test tkt-d11f09d36e.5 { + execsql { PRAGMA integrity_check } +} {ok} + +finish_test + diff --git a/test/tkt-f973c7ac31.test b/test/tkt-f973c7ac31.test new file mode 100644 index 0000000..882e86a --- /dev/null +++ b/test/tkt-f973c7ac31.test @@ -0,0 +1,87 @@ +# 2010 June 09 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt-f973c7ac3-1.0 { + execsql { + CREATE TABLE t(c1 INTEGER, c2 INTEGER); + INSERT INTO t VALUES(5, 5); + INSERT INTO t VALUES(5, 4); + } +} {} + +foreach {tn sql} { + 1 "" + 2 "CREATE INDEX i1 ON t(c1, c2)" +} { + + execsql $sql + + do_test tkt-f973c7ac3-1.$tn.1 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<='2' ORDER BY c2 DESC + } + } {} + do_test tkt-f973c7ac3-1.$tn.2 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<=5 ORDER BY c2 DESC + } + } {5 5 5 4} + do_test tkt-f973c7ac3-1.$tn.3 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<='5' ORDER BY c2 DESC + } + } {5 5 5 4} + do_test tkt-f973c7ac3-1.$tn.4 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>'0' AND c2<=5 ORDER BY c2 DESC + } + } {5 5 5 4} + do_test tkt-f973c7ac3-1.$tn.5 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>'0' AND c2<='5' ORDER BY c2 DESC + } + } {5 5 5 4} + + do_test tkt-f973c7ac3-1.$tn.6 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<='2' ORDER BY c2 ASC + } + } {} + do_test tkt-f973c7ac3-1.$tn.7 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<=5 ORDER BY c2 ASC + } + } {5 4 5 5} + do_test tkt-f973c7ac3-1.$tn.8 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>0 AND c2<='5' ORDER BY c2 ASC + } + } {5 4 5 5} + do_test tkt-f973c7ac3-1.$tn.9 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>'0' AND c2<=5 ORDER BY c2 ASC + } + } {5 4 5 5} + do_test tkt-f973c7ac3-1.$tn.10 { + execsql { + SELECT * FROM t WHERE c1 = 5 AND c2>'0' AND c2<='5' ORDER BY c2 ASC + } + } {5 4 5 5} +} + + +finish_test + diff --git a/test/tkt-fc62af4523.test b/test/tkt-fc62af4523.test new file mode 100644 index 0000000..ed1497b --- /dev/null +++ b/test/tkt-fc62af4523.test @@ -0,0 +1,84 @@ +# 2010 June 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests that ticket [fc62af4523] has been resolved. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl + +do_test tkt-fc62af4523.1 { + execsql { + PRAGMA cache_size = 10; + PRAGMA journal_mode = persist; + CREATE TABLE t1(a UNIQUE, b UNIQUE); + INSERT INTO t1 SELECT randomblob(200), randomblob(300); + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 2 + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 4 + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 8 + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 16 + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 32 + INSERT INTO t1 SELECT randomblob(200), randomblob(300) FROM t1; -- 64 + } + execsql { + PRAGMA integrity_check; + SELECT count(*) FROM t1; + } +} {ok 64} + +# Launch an external process. Have it write (but not commit) a large +# transaction to the database. +# +set ::chan [launch_testfixture] +proc buddy {code} { testfixture $::chan $code } +do_test tkt-fc62af4523.2 { + testfixture $::chan { + sqlite3 db test.db + db eval { + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET b = randomblob(400); + UPDATE t1 SET a = randomblob(201); + } + } + file exists test.db-journal +} {1} + +# Now do "PRAGMA journal_mode = DELETE" in this process. At one point +# this was causing SQLite to delete the journal file from the file-system, +# even though the external process is currently using it. +# +do_test tkt-fc62af4523.3 { execsql { PRAGMA journal_mode = DELETE } } {delete} +do_test tkt-fc62af4523.4 { file exists test.db-journal } {1} + +# Cause the external process to crash. Since it has already written +# uncommitted data into the database file, the next reader will have +# to do a hot-journal rollback to recover the database. +# +# Or, if this test is run in a version with the bug present, the journal +# file has already been deleted. In this case we are left with a corrupt +# database file and no hot-journal to fix it with. +# +do_test tkt-fc62af4523.5 { + testfixture $::chan sqlite_abort +} {ERROR: Child process hung up} +after 200 +do_test tkt-fc62af4523.6 { + execsql { + PRAGMA integrity_check; + SELECT count(*) FROM t1; + } +} {ok 64} + +catch { close $::chan } +finish_test diff --git a/test/tkt3472.test b/test/tkt3472.test deleted file mode 100644 index 5e4b537..0000000 --- a/test/tkt3472.test +++ /dev/null @@ -1,39 +0,0 @@ -# 2008 November 11 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# $Id: tkt3472.test,v 1.4 2008/12/03 22:32:45 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -if {![info exists threadsOverrideEachOthersLocks]} { - finish_test - return -} - -set ::correctvalue $threadsOverrideEachOthersLocks -puts "threadsOverrideEachOthersLocks = $::correctvalue" - -do_test tkt3472-1.1 { - db close - set threadsOverrideEachOthersLocks -1 - sqlite3 db test.db - set threadsOverrideEachOthersLocks -} $::correctvalue - -do_test tkt3472-1.2 { - db close - set threadsOverrideEachOthersLocks -1 - sqlite3 db test.db -readonly 1 - set threadsOverrideEachOthersLocks -} $::correctvalue - -finish_test diff --git a/test/trans.test b/test/trans.test index 1965941..bbaedc5 100644 --- a/test/trans.test +++ b/test/trans.test @@ -19,6 +19,7 @@ source $testdir/tester.tcl # Create several tables to work with. # +wal_set_journal_mode do_test trans-1.0 { execsql { CREATE TABLE one(a int PRIMARY KEY, b text); @@ -46,6 +47,7 @@ do_test trans-1.10 { execsql {SELECT b FROM two ORDER BY a} altdb } {I V X} integrity_check trans-1.11 +wal_check_journal_mode trans-1.12 # Basic transactions # @@ -82,6 +84,7 @@ do_test trans-2.10 { } } {1 2 3 1 5 10} integrity_check trans-2.11 +wal_check_journal_mode trans-2.12 # Check the locking behavior # @@ -162,6 +165,7 @@ do_test trans-3.14 { lappend v $msg } {0 {1 2 3 4}} integrity_check trans-3.15 +wal_check_journal_mode trans-3.16 do_test trans-4.1 { set v [catch {execsql { @@ -228,6 +232,8 @@ do_test trans-4.11 { lappend v $msg } {0 {1 2 3 4}} integrity_check trans-4.12 +wal_check_journal_mode trans-4.13 +wal_check_journal_mode trans-4.14 altdb do_test trans-4.98 { altdb close execsql { @@ -775,6 +781,7 @@ do_test trans-7.14 { execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} } $checksum2 integrity_check trans-7.15 +wal_check_journal_mode trans-7.16 # Arrange for another process to begin modifying the database but abort # and die in the middle of the modification. Then have this process read @@ -824,7 +831,7 @@ do_test trans-8.5 { execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} } $checksum2 integrity_check trans-8.6 - +wal_check_journal_mode trans-8.7 # In the following sequence of tests, compute the MD5 sum of the content # of a table, make lots of modifications to that table, then do a rollback. @@ -854,6 +861,7 @@ do_test trans-9.1 { SELECT count(*) FROM t3; } } {1024} +wal_check_journal_mode trans-9.1.1 # The following procedure computes a "signature" for table "t3". If # T3 changes in any way, the signature should change. @@ -875,12 +883,11 @@ proc signature {} { # is in use, only 20 iterations. Otherwise the test pcache runs out # of page slots and SQLite reports "out of memory". # -if {[info exists ISQUICK] || ( - $TEMP_STORE==3 && [catch {set ::permutations_test_prefix} val]==0 && - [regexp {^pcache[[:digit:]]*$} $val] +if {[info exists G(isquick)] || ( + $TEMP_STORE==3 && [regexp {^pcache[[:digit:]]*$} [permutation]] ) } { set limit 20 -} elseif {[info exists SOAKTEST]} { +} elseif {[info exists G(issoak)]} { set limit 100 } else { set limit 40 @@ -925,6 +932,7 @@ for {set i 2} {$i<=$limit} {incr i} { INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0; } } {} + catch flush_async_queue if {$tcl_platform(platform)=="unix"} { do_test trans-9.$i.4-$cnt { expr {$sqlite_sync_count>0} @@ -940,6 +948,8 @@ for {set i 2} {$i<=$limit} {incr i} { } } } + + wal_check_journal_mode trans-9.$i.6-$cnt set ::pager_old_format 0 } diff --git a/test/trigger7.test b/test/trigger7.test index 2c79fa0..8fde200 100644 --- a/test/trigger7.test +++ b/test/trigger7.test @@ -112,10 +112,8 @@ do_test trigger7-99.1 { UPDATE sqlite_master SET sql='nonsense'; } db close - sqlite3 db test.db - catchsql { - DROP TRIGGER t2r5 - } + catch { sqlite3 db test.db } + catchsql { DROP TRIGGER t2r5 } } {1 {malformed database schema (t2r12) - near "nonsense": syntax error}} finish_test diff --git a/test/triggerA.test b/test/triggerA.test index 1a0055f..9f93db5 100644 --- a/test/triggerA.test +++ b/test/triggerA.test @@ -77,7 +77,7 @@ do_test triggerA-1.5 { do_test triggerA-1.6 { db eval { CREATE VIEW v5 AS SELECT x, b FROM t1, t2 WHERE y=c; - SELECT * FROM v5; + SELECT * FROM v5 ORDER BY x DESC; } } {10 1003 9 904 8 805 7 705 6 603 5 504 4 404 3 305 2 203 1 103} diff --git a/test/triggerC.test b/test/triggerC.test index c1967be..879594c 100644 --- a/test/triggerC.test +++ b/test/triggerC.test @@ -855,5 +855,67 @@ do_test triggerC-10.3 { } } {5 2 3 35 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 32 33 22 34 35 36 37 38 39 40} +#------------------------------------------------------------------------- +# Test that bug [371bab5d65] has been fixed. BEFORE INSERT and INSTEAD OF +# INSERT triggers with the DEFAULT VALUES INSERT syntax. +# +do_test triggerC-11.0 { + catchsql { DROP TABLE log } + execsql { CREATE TABLE log(a, b) } +} {} + +foreach {testno tbl defaults} { + 1 "CREATE TABLE t1(a, b)" {{} {}} + 2 "CREATE TABLE t1(a DEFAULT 1, b DEFAULT 'abc')" {1 abc} + 3 "CREATE TABLE t1(a, b DEFAULT 4.5)" {{} 4.5} +} { + do_test triggerC-11.$testno.1 { + catchsql { DROP TABLE t1 } + execsql { DELETE FROM log } + execsql $tbl + execsql { + CREATE TRIGGER tt1 BEFORE INSERT ON t1 BEGIN + INSERT INTO log VALUES(new.a, new.b); + END; + INSERT INTO t1 DEFAULT VALUES; + SELECT * FROM log; + } + } $defaults + + do_test triggerC-11.$testno.2 { + execsql { DELETE FROM log } + execsql { + CREATE TRIGGER tt2 AFTER INSERT ON t1 BEGIN + INSERT INTO log VALUES(new.a, new.b); + END; + INSERT INTO t1 DEFAULT VALUES; + SELECT * FROM log; + } + } [concat $defaults $defaults] + + do_test triggerC-11.$testno.3 { + execsql { DROP TRIGGER tt1 } + execsql { DELETE FROM log } + execsql { + INSERT INTO t1 DEFAULT VALUES; + SELECT * FROM log; + } + } $defaults +} +do_test triggerC-11.4 { + catchsql { DROP TABLE t2 } + execsql { + DELETE FROM log; + CREATE TABLE t2(a, b); + CREATE VIEW v2 AS SELECT * FROM t2; + CREATE TRIGGER tv2 INSTEAD OF INSERT ON v2 BEGIN + INSERT INTO log VALUES(new.a, new.b); + END; + INSERT INTO v2 DEFAULT VALUES; + SELECT a, b, a IS NULL, b IS NULL FROM log; + } +} {{} {} 1 1} + + finish_test diff --git a/test/vacuum.test b/test/vacuum.test index 4b1cd5a..256730b 100644 --- a/test/vacuum.test +++ b/test/vacuum.test @@ -54,6 +54,19 @@ do_test vacuum-1.1 { set ::cksum [cksum] expr {$::cksum!=""} } {1} + +# Create bogus application-defined functions for functions used +# internally by VACUUM, to ensure that VACUUM falls back +# to the built-in functions. +# +proc failing_app_func {args} {error "bad function"} +do_test vacuum-1.1b { + db func substr failing_app_func + db func like failing_app_func + db func quote failing_app_func + catchsql {SELECT substr(name,1,3) FROM sqlite_master} +} {1 {bad function}} + do_test vacuum-1.2 { execsql { VACUUM; diff --git a/test/veryquick.test b/test/veryquick.test index f212341..ca82b22 100644 --- a/test/veryquick.test +++ b/test/veryquick.test @@ -11,5 +11,9 @@ # $Id: veryquick.test,v 1.9 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] -set ISVERYQUICK 1 -source $testdir/quick.test +source $testdir/permutations.test + +run_test_suite veryquick + +finish_test + diff --git a/test/wal.test b/test/wal.test new file mode 100644 index 0000000..59097dc --- /dev/null +++ b/test/wal.test @@ -0,0 +1,1446 @@ +# 2010 April 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl +source $testdir/wal_common.tcl + +ifcapable !wal {finish_test ; return } + +proc reopen_db {} { + catch { db close } + file delete -force test.db test.db-wal test.db-wal-summary + sqlite3_wal db test.db +} + +set ::blobcnt 0 +proc blob {nByte} { + incr ::blobcnt + return [string range [string repeat "${::blobcnt}x" $nByte] 1 $nByte] +} + +proc sqlite3_wal {args} { + eval sqlite3 $args + [lindex $args 0] eval { PRAGMA auto_vacuum = 0 } + [lindex $args 0] eval { PRAGMA page_size = 1024 } + [lindex $args 0] eval { PRAGMA journal_mode = wal } + [lindex $args 0] eval { PRAGMA synchronous = normal } + [lindex $args 0] function blob blob +} + +proc log_deleted {logfile} { + return [expr [file exists $logfile]==0] +} + +# +# These are 'warm-body' tests used while developing the WAL code. They +# serve to prove that a few really simple cases work: +# +# wal-1.*: Read and write the database. +# wal-2.*: Test MVCC with one reader, one writer. +# wal-3.*: Test transaction rollback. +# wal-4.*: Test savepoint/statement rollback. +# wal-5.*: Test the temp database. +# wal-6.*: Test creating databases with different page sizes. +# +# + +do_test wal-0.1 { + execsql { PRAGMA auto_vacuum = 0 } + execsql { PRAGMA synchronous = normal } + execsql { PRAGMA journal_mode = wal } +} {wal} +do_test wal-0.2 { + file size test.db +} {1024} + +do_test wal-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + } + list [file exists test.db-journal] \ + [file exists test.db-wal] \ + [file size test.db] +} {0 1 1024} +do_test wal-1.1 { + execsql COMMIT + list [file exists test.db-journal] [file exists test.db-wal] +} {0 1} +do_test wal-1.2 { + # There are now two pages in the log. + file size test.db-wal +} [wal_file_size 2 1024] + +do_test wal-1.3 { + execsql { SELECT * FROM sqlite_master } +} {table t1 t1 2 {CREATE TABLE t1(a, b)}} + +do_test wal-1.4 { + execsql { INSERT INTO t1 VALUES(1, 2) } + execsql { INSERT INTO t1 VALUES(3, 4) } + execsql { INSERT INTO t1 VALUES(5, 6) } + execsql { INSERT INTO t1 VALUES(7, 8) } + execsql { INSERT INTO t1 VALUES(9, 10) } +} {} + +do_test wal-1.5 { + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6 7 8 9 10} + +do_test wal-2.1 { + sqlite3_wal db2 ./test.db + execsql { BEGIN; SELECT * FROM t1 } db2 +} {1 2 3 4 5 6 7 8 9 10} + +do_test wal-2.2 { + execsql { INSERT INTO t1 VALUES(11, 12) } + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6 7 8 9 10 11 12} + +do_test wal-2.3 { + execsql { SELECT * FROM t1 } db2 +} {1 2 3 4 5 6 7 8 9 10} + +do_test wal-2.4 { + execsql { INSERT INTO t1 VALUES(13, 14) } + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14} + +do_test wal-2.5 { + execsql { SELECT * FROM t1 } db2 +} {1 2 3 4 5 6 7 8 9 10} + +do_test wal-2.6 { + execsql { COMMIT; SELECT * FROM t1 } db2 +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14} + +do_test wal-3.1 { + execsql { BEGIN; DELETE FROM t1 } + execsql { SELECT * FROM t1 } +} {} +do_test wal-3.2 { + execsql { SELECT * FROM t1 } db2 +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14} +do_test wal-3.3 { + execsql { ROLLBACK } + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14} +db2 close + +#------------------------------------------------------------------------- +# The following tests, wal-4.*, test that savepoints work with WAL +# databases. +# +do_test wal-4.1 { + execsql { + DELETE FROM t1; + BEGIN; + INSERT INTO t1 VALUES('a', 'b'); + SAVEPOINT sp; + INSERT INTO t1 VALUES('c', 'd'); + SELECT * FROM t1; + } +} {a b c d} +do_test wal-4.2 { + execsql { + ROLLBACK TO sp; + SELECT * FROM t1; + } +} {a b} +do_test wal-4.3 { + execsql { + COMMIT; + SELECT * FROM t1; + } +} {a b} + +do_test wal-4.4.1 { + db close + sqlite3 db test.db + db func blob blob + list [execsql { SELECT * FROM t1 }] [file size test.db-wal] +} {{a b} 0} +do_test wal-4.4.2 { + execsql { PRAGMA cache_size = 10 } + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES(blob(400), blob(400)); + SAVEPOINT tr; + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 2 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 4 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 8 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 16 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 32 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 4 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 8 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 16 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 32 */ + SELECT count(*) FROM t2; + } +} {32} +do_test wal-4.4.3 { + execsql { ROLLBACK TO tr } +} {} +do_test wal-4.4.4 { + set logsize [file size test.db-wal] + execsql { + INSERT INTO t1 VALUES('x', 'y'); + RELEASE tr; + } + expr { $logsize == [file size test.db-wal] } +} {1} +do_test wal-4.4.5 { + execsql { SELECT count(*) FROM t2 } +} {1} +do_test wal-4.4.6 { + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + sqlite3 db2 test2.db + execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } db2 +} {1 2} +do_test wal-4.4.7 { + execsql { PRAGMA integrity_check } db2 +} {ok} +db2 close + +do_test wal-4.5.1 { + reopen_db + db func blob blob + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES('a', 'b'); + } + sqlite3 db test.db + db func blob blob + list [execsql { SELECT * FROM t1 }] [file size test.db-wal] +} {{a b} 0} +do_test wal-4.5.2 { + execsql { PRAGMA cache_size = 10 } + execsql { + CREATE TABLE t2(a, b); + BEGIN; + INSERT INTO t2 VALUES(blob(400), blob(400)); + SAVEPOINT tr; + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 2 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 4 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 8 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 16 */ + INSERT INTO t2 SELECT blob(400), blob(400) FROM t2; /* 32 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 4 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 8 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 16 */ + INSERT INTO t1 SELECT blob(400), blob(400) FROM t1; /* 32 */ + SELECT count(*) FROM t2; + } +} {32} +do_test wal-4.5.3 { + execsql { ROLLBACK TO tr } +} {} +do_test wal-4.5.4 { + set logsize [file size test.db-wal] + execsql { + INSERT INTO t1 VALUES('x', 'y'); + RELEASE tr; + COMMIT; + } + expr { $logsize == [file size test.db-wal] } +} {1} +do_test wal-4.5.5 { + execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } +} {1 2} +do_test wal-4.5.6 { + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + sqlite3 db2 test2.db + execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } db2 +} {1 2} +do_test wal-4.5.7 { + execsql { PRAGMA integrity_check } db2 +} {ok} +db2 close + +do_test wal-4.6.1 { + execsql { + DELETE FROM t2; + PRAGMA wal_checkpoint; + BEGIN; + INSERT INTO t2 VALUES('w', 'x'); + SAVEPOINT save; + INSERT INTO t2 VALUES('y', 'z'); + ROLLBACK TO save; + COMMIT; + SELECT * FROM t2; + } +} {w x} + + +reopen_db +do_test wal-5.1 { + execsql { + CREATE TEMP TABLE t2(a, b); + INSERT INTO t2 VALUES(1, 2); + } +} {} +do_test wal-5.2 { + execsql { + BEGIN; + INSERT INTO t2 VALUES(3, 4); + SELECT * FROM t2; + } +} {1 2 3 4} +do_test wal-5.3 { + execsql { + ROLLBACK; + SELECT * FROM t2; + } +} {1 2} +do_test wal-5.4 { + execsql { + CREATE TEMP TABLE t3(x UNIQUE); + BEGIN; + INSERT INTO t2 VALUES(3, 4); + INSERT INTO t3 VALUES('abc'); + } + catchsql { INSERT INTO t3 VALUES('abc') } +} {1 {column x is not unique}} +do_test wal-5.5 { + execsql { + COMMIT; + SELECT * FROM t2; + } +} {1 2 3 4} +db close + +foreach sector {512 4096} { + sqlite3_simulate_device -sectorsize $sector + foreach pgsz {512 1024 2048 4096} { + file delete -force test.db test.db-wal + do_test wal-6.$sector.$pgsz.1 { + sqlite3 db test.db -vfs devsym + execsql " + PRAGMA page_size = $pgsz; + PRAGMA auto_vacuum = 0; + PRAGMA journal_mode = wal; + " + execsql " + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + " + db close + file size test.db + } [expr $pgsz*2] + + do_test wal-6.$sector.$pgsz.2 { + log_deleted test.db-wal + } {1} + } +} + +do_test wal-7.1 { + file delete -force test.db test.db-wal + sqlite3_wal db test.db + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } + list [file size test.db] [file size test.db-wal] +} [list 1024 [wal_file_size 3 1024]] +do_test wal-7.2 { + execsql { PRAGMA wal_checkpoint } + list [file size test.db] [file size test.db-wal] +} [list 2048 [wal_file_size 3 1024]] + +# Execute some transactions in auto-vacuum mode to test database file +# truncation. +# +do_test wal-8.1 { + reopen_db + catch { db close } + file delete -force test.db test.db-wal + + sqlite3 db test.db + db function blob blob + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA journal_mode = wal; + PRAGMA auto_vacuum; + } +} {wal 1} +do_test wal-8.2 { + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(blob(900)); + INSERT INTO t1 VALUES(blob(900)); + INSERT INTO t1 SELECT blob(900) FROM t1; /* 4 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 8 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 16 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 32 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 64 */ + PRAGMA wal_checkpoint; + } + file size test.db +} [expr 68*1024] +do_test wal-8.3 { + execsql { + DELETE FROM t1 WHERE rowid<54; + PRAGMA wal_checkpoint; + } + file size test.db +} [expr 14*1024] + +# Run some "warm-body" tests to ensure that log-summary files with more +# than 256 entries (log summaries that contain index blocks) work Ok. +# +do_test wal-9.1 { + reopen_db + execsql { + CREATE TABLE t1(x PRIMARY KEY); + INSERT INTO t1 VALUES(blob(900)); + INSERT INTO t1 VALUES(blob(900)); + INSERT INTO t1 SELECT blob(900) FROM t1; /* 4 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 8 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 16 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 32 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 64 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 128 */ + INSERT INTO t1 SELECT blob(900) FROM t1; /* 256 */ + } + file size test.db +} 1024 +do_test wal-9.2 { + sqlite3_wal db2 test.db + execsql {PRAGMA integrity_check } db2 +} {ok} + +do_test wal-9.3 { + file delete -force test2.db test2.db-wal + file copy test.db test2.db + file copy test.db-wal test2.db-wal + sqlite3_wal db3 test2.db + execsql {PRAGMA integrity_check } db3 +} {ok} +db3 close + +do_test wal-9.4 { + execsql { PRAGMA wal_checkpoint } + db2 close + sqlite3_wal db2 test.db + execsql {PRAGMA integrity_check } db2 +} {ok} + +foreach handle {db db2 db3} { catch { $handle close } } +unset handle + +#------------------------------------------------------------------------- +# The following block of tests - wal-10.* - test that the WAL locking +# scheme works in simple cases. This block of tests is run twice. Once +# using multiple connections in the address space of the current process, +# and once with all connections except one running in external processes. +# +do_multiclient_test tn { + + # Initialize the database schema and contents. + # + do_test wal-10.$tn.1 { + execsql { + PRAGMA journal_mode = wal; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + SELECT * FROM t1; + } + } {wal 1 2} + + # Open a transaction and write to the database using [db]. Check that [db2] + # is still able to read the snapshot before the transaction was opened. + # + do_test wal-10.$tn.2 { + execsql { BEGIN; INSERT INTO t1 VALUES(3, 4); } + sql2 {SELECT * FROM t1} + } {1 2} + + # Have [db] commit the transaction. Check that [db2] is now seeing the + # new, updated snapshot. + # + do_test wal-10.$tn.3 { + execsql { COMMIT } + sql2 {SELECT * FROM t1} + } {1 2 3 4} + + # Have [db2] open a read transaction. Then write to the db via [db]. Check + # that [db2] is still seeing the original snapshot. Then read with [db3]. + # [db3] should see the newly committed data. + # + do_test wal-10.$tn.4 { + sql2 { BEGIN ; SELECT * FROM t1} + } {1 2 3 4} + do_test wal-10.$tn.5 { + execsql { INSERT INTO t1 VALUES(5, 6); } + sql2 {SELECT * FROM t1} + } {1 2 3 4} + do_test wal-10.$tn.6 { + sql3 {SELECT * FROM t1} + } {1 2 3 4 5 6} + do_test wal-10.$tn.7 { + sql2 COMMIT + } {} + + # Have [db2] open a write transaction. Then attempt to write to the + # database via [db]. This should fail (writer lock cannot be obtained). + # + # Then open a read-transaction with [db]. Commit the [db2] transaction + # to disk. Verify that [db] still cannot write to the database (because + # it is reading an old snapshot). + # + # Close the current [db] transaction. Open a new one. [db] can now write + # to the database (as it is not locked and [db] is reading the latest + # snapshot). + # + do_test wal-10.$tn.7 { + sql2 { BEGIN; INSERT INTO t1 VALUES(7, 8) ; } + catchsql { INSERT INTO t1 VALUES(9, 10) } + } {1 {database is locked}} + do_test wal-10.$tn.8 { + execsql { BEGIN ; SELECT * FROM t1 } + } {1 2 3 4 5 6} + do_test wal-10.$tn.9 { + sql2 COMMIT + catchsql { INSERT INTO t1 VALUES(9, 10) } + } {1 {database is locked}} + do_test wal-10.$tn.10 { + execsql { COMMIT } + execsql { BEGIN } + execsql { INSERT INTO t1 VALUES(9, 10) } + execsql { COMMIT } + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10} + + # Open a read transaction with [db2]. Check that this prevents [db] from + # checkpointing the database. But not from writing to it. + # + do_test wal-10.$tn.11 { + sql2 { BEGIN; SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10} + do_test wal-10.$tn.12 { + catchsql { PRAGMA wal_checkpoint } + } {0 {}} ;# Reader no longer block checkpoints + do_test wal-10.$tn.13 { + execsql { INSERT INTO t1 VALUES(11, 12) } + sql2 {SELECT * FROM t1} + } {1 2 3 4 5 6 7 8 9 10} + + # Writers do not block checkpoints any more either. + # + do_test wal-10.$tn.14 { + catchsql { PRAGMA wal_checkpoint } + } {0 {}} + + # The following series of test cases used to verify another blocking + # case in WAL - a case which no longer blocks. + # + do_test wal-10.$tn.15 { + sql2 { COMMIT; BEGIN; SELECT * FROM t1; } + } {1 2 3 4 5 6 7 8 9 10 11 12} + do_test wal-10.$tn.16 { + catchsql { PRAGMA wal_checkpoint } + } {0 {}} + do_test wal-10.$tn.17 { + execsql { PRAGMA wal_checkpoint } + } {} + do_test wal-10.$tn.18 { + sql3 { BEGIN; SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10 11 12} + do_test wal-10.$tn.19 { + catchsql { INSERT INTO t1 VALUES(13, 14) } + } {0 {}} + do_test wal-10.$tn.20 { + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14} + do_test wal-10.$tn.21 { + sql3 COMMIT + sql2 COMMIT + } {} + do_test wal-10.$tn.22 { + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14} + + # Another series of tests that used to demonstrate blocking behavior + # but which now work. + # + do_test wal-10.$tn.23 { + execsql { PRAGMA wal_checkpoint } + } {} + do_test wal-10.$tn.24 { + sql2 { BEGIN; SELECT * FROM t1; } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14} + do_test wal-10.$tn.25 { + execsql { PRAGMA wal_checkpoint } + } {} + do_test wal-10.$tn.26 { + catchsql { INSERT INTO t1 VALUES(15, 16) } + } {0 {}} + do_test wal-10.$tn.27 { + sql3 { INSERT INTO t1 VALUES(17, 18) } + } {} + do_test wal-10.$tn.28 { + code3 { + set ::STMT [sqlite3_prepare db3 "SELECT * FROM t1" -1 TAIL] + sqlite3_step $::STMT + } + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18} + do_test wal-10.$tn.29 { + execsql { INSERT INTO t1 VALUES(19, 20) } + catchsql { PRAGMA wal_checkpoint } + } {0 {}} + do_test wal-10.$tn.30 { + code3 { sqlite3_finalize $::STMT } + execsql { PRAGMA wal_checkpoint } + } {} + + # At one point, if a reader failed to upgrade to a writer because it + # was reading an old snapshot, the write-locks were not being released. + # Test that this bug has been fixed. + # + do_test wal-10.$tn.31 { + sql2 COMMIT + execsql { BEGIN ; SELECT * FROM t1 } + sql2 { INSERT INTO t1 VALUES(21, 22) } + catchsql { INSERT INTO t1 VALUES(23, 24) } + } {1 {database is locked}} + do_test wal-10.$tn.32 { + # This statement would fail when the bug was present. + sql2 { INSERT INTO t1 VALUES(23, 24) } + } {} + do_test wal-10.$tn.33 { + execsql { SELECT * FROM t1 ; COMMIT } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + do_test wal-10.$tn.34 { + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24} + + # Test that if a checkpointer cannot obtain the required locks, it + # releases all locks before returning a busy error. + # + do_test wal-10.$tn.35 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('a', 'b'); + INSERT INTO t1 VALUES('c', 'd'); + } + sql2 { + BEGIN; + SELECT * FROM t1; + } + } {a b c d} + do_test wal-10.$tn.36 { + catchsql { PRAGMA wal_checkpoint } + } {0 {}} + do_test wal-10.$tn.36 { + sql3 { INSERT INTO t1 VALUES('e', 'f') } + sql2 { SELECT * FROM t1 } + } {a b c d} + do_test wal-10.$tn.37 { + sql2 COMMIT + execsql { PRAGMA wal_checkpoint } + } {} +} + +#------------------------------------------------------------------------- +# This block of tests, wal-11.*, test that nothing goes terribly wrong +# if frames must be written to the log file before a transaction is +# committed (in order to free up memory). +# +do_test wal-11.1 { + reopen_db + execsql { + PRAGMA cache_size = 10; + PRAGMA page_size = 1024; + CREATE TABLE t1(x PRIMARY KEY); + } + list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] +} {1 3} +do_test wal-11.2 { + execsql { PRAGMA wal_checkpoint } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 3 [wal_file_size 3 1024]] +do_test wal-11.3 { + execsql { INSERT INTO t1 VALUES( blob(900) ) } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 3 [wal_file_size 4 1024]] + +do_test wal-11.4 { + execsql { + BEGIN; + INSERT INTO t1 SELECT blob(900) FROM t1; -- 2 + INSERT INTO t1 SELECT blob(900) FROM t1; -- 4 + INSERT INTO t1 SELECT blob(900) FROM t1; -- 8 + INSERT INTO t1 SELECT blob(900) FROM t1; -- 16 + } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 3 [wal_file_size 32 1024]] +do_test wal-11.5 { + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {16 ok} +do_test wal-11.6 { + execsql COMMIT + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 3 [wal_file_size 41 1024]] +do_test wal-11.7 { + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {16 ok} +do_test wal-11.8 { + execsql { PRAGMA wal_checkpoint } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 37 [wal_file_size 41 1024]] +do_test wal-11.9 { + db close + list [expr [file size test.db]/1024] [log_deleted test.db-wal] +} {37 1} +sqlite3_wal db test.db +do_test wal-11.10 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO t1 SELECT blob(900) FROM t1; -- 32 + SELECT count(*) FROM t1; + } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 37 [wal_file_size 37 1024]] +do_test wal-11.11 { + execsql { + SELECT count(*) FROM t1; + ROLLBACK; + SELECT count(*) FROM t1; + } +} {32 16} +do_test wal-11.12 { + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 37 [wal_file_size 37 1024]] +do_test wal-11.13 { + execsql { + INSERT INTO t1 VALUES( blob(900) ); + SELECT count(*) FROM t1; + PRAGMA integrity_check; + } +} {17 ok} +do_test wal-11.14 { + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 37 [wal_file_size 37 1024]] + + +#------------------------------------------------------------------------- +# This block of tests, wal-12.*, tests the fix for a problem that +# could occur if a log that is a prefix of an older log is written +# into a reused log file. +# +reopen_db +do_test wal-12.1 { + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(x, y); + CREATE TABLE t2(x, y); + INSERT INTO t1 VALUES('A', 1); + } + list [expr [file size test.db]/1024] [file size test.db-wal] +} [list 1 [wal_file_size 5 1024]] +do_test wal-12.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA synchronous = normal; + UPDATE t1 SET y = 0 WHERE x = 'A'; + } + list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] +} {3 1} +do_test wal-12.3 { + execsql { INSERT INTO t2 VALUES('B', 1) } + list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] +} {3 2} +do_test wal-12.4 { + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + sqlite3_wal db2 test2.db + execsql { SELECT * FROM t2 } db2 +} {B 1} +db2 close +do_test wal-12.5 { + execsql { + PRAGMA wal_checkpoint; + UPDATE t2 SET y = 2 WHERE x = 'B'; + PRAGMA wal_checkpoint; + UPDATE t1 SET y = 1 WHERE x = 'A'; + PRAGMA wal_checkpoint; + UPDATE t1 SET y = 0 WHERE x = 'A'; + SELECT * FROM t2; + } +} {B 2} +do_test wal-12.6 { + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + sqlite3_wal db2 test2.db + execsql { SELECT * FROM t2 } db2 +} {B 2} +db2 close +db close + +#------------------------------------------------------------------------- +# Test large log summaries. +# +# In this case "large" usually means a log file that requires a wal-index +# mapping larger than 64KB (the default initial allocation). A 64KB wal-index +# is large enough for a log file that contains approximately 13100 frames. +# So the following tests create logs containing at least this many frames. +# +# wal-13.1.*: This test case creates a very large log file within the +# file-system (around 200MB). The log file does not contain +# any valid frames. Test that the database file can still be +# opened and queried, and that the invalid log file causes no +# problems. +# +# wal-13.2.*: Test that a process may create a large log file and query +# the database (including the log file that it itself created). +# +# wal-13.3.*: Test that if a very large log file is created, and then a +# second connection is opened on the database file, it is possible +# to query the database (and the very large log) using the +# second connection. +# +# wal-13.4.*: Same test as wal-13.3.*. Except in this case the second +# connection is opened by an external process. +# +do_test wal-13.1.1 { + list [file exists test.db] [file exists test.db-wal] +} {1 0} +do_test wal-13.1.2 { + set fd [open test.db-wal w] + seek $fd [expr 200*1024*1024] + puts $fd "" + close $fd + sqlite3 db test.db + execsql { SELECT * FROM t2 } +} {B 2} +do_test wal-13.1.3 { + db close + file exists test.db-wal +} {0} + +do_test wal-13.2.1 { + sqlite3 db test.db + execsql { SELECT count(*) FROM t2 } +} {1} +do_test wal-13.2.2 { + db function blob blob + for {set i 0} {$i < 16} {incr i} { + execsql { INSERT INTO t2 SELECT blob(400), blob(400) FROM t2 } + } + execsql { SELECT count(*) FROM t2 } +} [expr int(pow(2, 16))] +do_test wal-13.2.3 { + expr [file size test.db-wal] > [wal_file_size 33000 1024] +} 1 + +do_multiclient_test tn { + incr tn 2 + + do_test wal-13.$tn.0 { + sql1 { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(x); + INSERT INTO t1 SELECT randomblob(800); + } + sql1 { SELECT count(*) FROM t1 } + } {1} + + for {set ii 1} {$ii<16} {incr ii} { + do_test wal-13.$tn.$ii.a { + sql2 { INSERT INTO t1 SELECT randomblob(800) FROM t1 } + sql2 { SELECT count(*) FROM t1 } + } [expr (1<<$ii)] + do_test wal-13.$tn.$ii.b { + sql1 { SELECT count(*) FROM t1 } + } [expr (1<<$ii)] + do_test wal-13.$tn.$ii.c { + sql1 { SELECT count(*) FROM t1 } + } [expr (1<<$ii)] + do_test wal-13.$tn.$ii.d { + sql1 { PRAGMA integrity_check } + } {ok} + } +} + +#------------------------------------------------------------------------- +# Check a fun corruption case has been fixed. +# +# The problem was that after performing a checkpoint using a connection +# that had an out-of-date pager-cache, the next time the connection was +# used it did not realize the cache was out-of-date and proceeded to +# operate with an inconsistent cache. Leading to corruption. +# +catch { db close } +catch { db2 close } +catch { db3 close } +file delete -force test.db test.db-wal +sqlite3 db test.db +sqlite3 db2 test.db +do_test wal-14 { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(randomblob(10), randomblob(100)); + INSERT INTO t1 SELECT randomblob(10), randomblob(100) FROM t1; + INSERT INTO t1 SELECT randomblob(10), randomblob(100) FROM t1; + INSERT INTO t1 SELECT randomblob(10), randomblob(100) FROM t1; + } + + db2 eval { + INSERT INTO t1 SELECT randomblob(10), randomblob(100); + INSERT INTO t1 SELECT randomblob(10), randomblob(100); + INSERT INTO t1 SELECT randomblob(10), randomblob(100); + INSERT INTO t1 SELECT randomblob(10), randomblob(100); + } + + # After executing the "PRAGMA wal_checkpoint", connection [db] was being + # left with an inconsistent cache. Running the CREATE INDEX statement + # in this state led to database corruption. + catchsql { + PRAGMA wal_checkpoint; + CREATE INDEX i1 on t1(b); + } + + db2 eval { PRAGMA integrity_check } +} {ok} + +catch { db close } +catch { db2 close } + +#------------------------------------------------------------------------- +# The following block of tests - wal-15.* - focus on testing the +# implementation of the sqlite3_wal_checkpoint() interface. +# +file delete -force test.db test.db-wal +sqlite3 db test.db +do_test wal-15.1 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + } + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } +} {} + +# Test that an error is returned if the database name is not recognized +# +do_test wal-15.2.1 { + sqlite3_wal_checkpoint db aux +} {SQLITE_ERROR} +do_test wal-15.2.2 { + sqlite3_errcode db +} {SQLITE_ERROR} +do_test wal-15.2.3 { + sqlite3_errmsg db +} {unknown database: aux} + +# Test that an error is returned if an attempt is made to checkpoint +# if a transaction is open on the database. +# +do_test wal-15.3.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + } + sqlite3_wal_checkpoint db main +} {SQLITE_LOCKED} +do_test wal-15.3.2 { + sqlite3_errcode db +} {SQLITE_LOCKED} +do_test wal-15.3.3 { + sqlite3_errmsg db +} {database table is locked} + +# Earlier versions returned an error is returned if the db cannot be +# checkpointed because of locks held by another connection. Check that +# this is no longer the case. +# +sqlite3 db2 test.db +do_test wal-15.4.1 { + execsql { + BEGIN; + SELECT * FROM t1; + } db2 +} {1 2} +do_test wal-15.4.2 { + execsql { COMMIT } + sqlite3_wal_checkpoint db +} {SQLITE_OK} +do_test wal-15.4.3 { + sqlite3_errmsg db +} {not an error} + +# After [db2] drops its lock, [db] may checkpoint the db. +# +do_test wal-15.4.4 { + execsql { COMMIT } db2 + sqlite3_wal_checkpoint db +} {SQLITE_OK} +do_test wal-15.4.5 { + sqlite3_errmsg db +} {not an error} +do_test wal-15.4.6 { + file size test.db +} [expr 1024*2] + +catch { db2 close } +catch { db close } + +#------------------------------------------------------------------------- +# The following block of tests - wal-16.* - test that if a NULL pointer or +# an empty string is passed as the second argument of the wal_checkpoint() +# API, an attempt is made to checkpoint all attached databases. +# +foreach {tn ckpt_cmd ckpt_res ckpt_main ckpt_aux} { + 1 {sqlite3_wal_checkpoint db} SQLITE_OK 1 1 + 2 {sqlite3_wal_checkpoint db ""} SQLITE_OK 1 1 + 3 {db eval "PRAGMA wal_checkpoint"} {} 1 1 + + 4 {sqlite3_wal_checkpoint db main} SQLITE_OK 1 0 + 5 {sqlite3_wal_checkpoint db aux} SQLITE_OK 0 1 + 6 {sqlite3_wal_checkpoint db temp} SQLITE_OK 0 0 + 7 {db eval "PRAGMA main.wal_checkpoint"} {} 1 0 + 8 {db eval "PRAGMA aux.wal_checkpoint"} {} 0 1 + 9 {db eval "PRAGMA temp.wal_checkpoint"} {} 0 0 +} { + do_test wal-16.$tn.1 { + file delete -force test2.db test2.db-wal test2.db-journal + file delete -force test.db test.db-wal test.db-journal + + sqlite3 db test.db + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA main.auto_vacuum = 0; + PRAGMA aux.auto_vacuum = 0; + PRAGMA main.journal_mode = WAL; + PRAGMA aux.journal_mode = WAL; + PRAGMA synchronous = NORMAL; + } + } {wal wal} + + do_test wal-16.$tn.2 { + execsql { + CREATE TABLE main.t1(a, b, PRIMARY KEY(a, b)); + CREATE TABLE aux.t2(a, b, PRIMARY KEY(a, b)); + + INSERT INTO t2 VALUES(1, randomblob(1000)); + INSERT INTO t2 VALUES(2, randomblob(1000)); + INSERT INTO t1 SELECT * FROM t2; + } + + list [file size test.db] [file size test.db-wal] + } [list [expr 1*1024] [wal_file_size 10 1024]] + do_test wal-16.$tn.3 { + list [file size test2.db] [file size test2.db-wal] + } [list [expr 1*1024] [wal_file_size 16 1024]] + + do_test wal-16.$tn.4 [list eval $ckpt_cmd] $ckpt_res + + do_test wal-16.$tn.5 { + list [file size test.db] [file size test.db-wal] + } [list [expr ($ckpt_main ? 7 : 1)*1024] [wal_file_size 10 1024]] + + do_test wal-16.$tn.6 { + list [file size test2.db] [file size test2.db-wal] + } [list [expr ($ckpt_aux ? 7 : 1)*1024] [wal_file_size 16 1024]] + + catch { db close } +} + +#------------------------------------------------------------------------- +# The following tests - wal-17.* - attempt to verify that the correct +# number of "padding" frames are appended to the log file when a transaction +# is committed in synchronous=FULL mode. +# +# Do this by creating a database that uses 512 byte pages. Then writing +# a transaction that modifies 171 pages. In synchronous=NORMAL mode, this +# produces a log file of: +# +# 32 + (24+512)*171 = 90312 bytes. +# +# Slightly larger than 11*8192 = 90112 bytes. +# +# Run the test using various different sector-sizes. In each case, the +# WAL code should write the 90300 bytes of log file containing the +# transaction, then append as may frames as are required to extend the +# log file so that no part of the next transaction will be written into +# a disk-sector used by transaction just committed. +# +set old_pending_byte [sqlite3_test_control_pending_byte 0x10000000] +catch { db close } +foreach {tn sectorsize logsize} " + 1 128 [wal_file_size 172 512] + 2 256 [wal_file_size 172 512] + 3 512 [wal_file_size 172 512] + 4 1024 [wal_file_size 172 512] + 5 2048 [wal_file_size 172 512] + 6 4096 [wal_file_size 176 512] + 7 8192 [wal_file_size 184 512] +" { + file delete -force test.db test.db-wal test.db-journal + sqlite3_simulate_device -sectorsize $sectorsize + sqlite3 db test.db -vfs devsym + + do_test wal-17.$tn.1 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 512; + PRAGMA journal_mode = WAL; + PRAGMA synchronous = FULL; + } + execsql { + BEGIN; + CREATE TABLE t(x); + } + for {set i 0} {$i<166} {incr i} { + execsql { INSERT INTO t VALUES(randomblob(400)) } + } + execsql COMMIT + + file size test.db-wal + } $logsize + + do_test wal-17.$tn.2 { + file size test.db + } 512 + + do_test wal-17.$tn.3 { + db close + file size test.db + } [expr 512*171] +} +sqlite3_test_control_pending_byte $old_pending_byte + +#------------------------------------------------------------------------- +# This test - wal-18.* - verifies a couple of specific conditions that +# may be encountered while recovering a log file are handled correctly: +# +# wal-18.1.* When the first 32-bits of a frame checksum is correct but +# the second 32-bits are false, and +# +# wal-18.2.* When the page-size field that occurs at the start of a log +# file is a power of 2 greater than 16384 or smaller than 512. +# +file delete -force test.db test.db-wal test.db-journal +do_test wal-18.0 { + sqlite3 db test.db + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = 0; + PRAGMA journal_mode = WAL; + PRAGMA synchronous = OFF; + + CREATE TABLE t1(a, b, UNIQUE(a, b)); + INSERT INTO t1 VALUES(0, 0); + PRAGMA wal_checkpoint; + + INSERT INTO t1 VALUES(1, 2); -- frames 1 and 2 + INSERT INTO t1 VALUES(3, 4); -- frames 3 and 4 + INSERT INTO t1 VALUES(5, 6); -- frames 5 and 6 + } + + file copy -force test.db testX.db + file copy -force test.db-wal testX.db-wal + db close + list [file size testX.db] [file size testX.db-wal] +} [list [expr 3*1024] [wal_file_size 6 1024]] + +unset -nocomplain nFrame result +foreach {nFrame result} { + 0 {0 0} + 1 {0 0} + 2 {0 0 1 2} + 3 {0 0 1 2} + 4 {0 0 1 2 3 4} + 5 {0 0 1 2 3 4} + 6 {0 0 1 2 3 4 5 6} +} { + do_test wal-18.1.$nFrame { + file copy -force testX.db test.db + file copy -force testX.db-wal test.db-wal + + hexio_write test.db-wal [expr 24 + $nFrame*(24+1024) + 20] 00000000 + + sqlite3 db test.db + execsql { + SELECT * FROM t1; + PRAGMA integrity_check; + } + } [concat $result ok] + db close +} + +proc randomblob {pgsz} { + sqlite3 rbdb :memory: + set blob [rbdb one {SELECT randomblob($pgsz)}] + rbdb close + set blob +} + +proc logcksum {ckv1 ckv2 blob} { + upvar $ckv1 c1 + upvar $ckv2 c2 + + set scanpattern I* + if {$::tcl_platform(byteOrder) eq "littleEndian"} { + set scanpattern i* + } + + binary scan $blob $scanpattern values + foreach {v1 v2} $values { + set c1 [expr {($c1 + $v1 + $c2)&0xFFFFFFFF}] + set c2 [expr {($c2 + $v2 + $c1)&0xFFFFFFFF}] + } +} + +file copy -force test.db testX.db +foreach {tn pgsz works} { + 1 128 0 + 2 256 0 + 3 512 1 + 4 1024 1 + 5 2048 1 + 6 4096 1 + 7 8192 1 + 8 16384 1 + 9 32768 1 + 10 65536 0 + 11 1016 0 +} { + + if {$::SQLITE_MAX_PAGE_SIZE < $pgsz} { + set works 0 + } + + for {set pg 1} {$pg <= 3} {incr pg} { + file copy -force testX.db test.db + file delete -force test.db-wal + + # Check that the database now exists and consists of three pages. And + # that there is no associated wal file. + # + do_test wal-18.2.$tn.$pg.1 { file exists test.db-wal } 0 + do_test wal-18.2.$tn.$pg.2 { file exists test.db } 1 + do_test wal-18.2.$tn.$pg.3 { file size test.db } [expr 1024*3] + + do_test wal-18.2.$tn.$pg.4 { + + # Create a wal file that contains a single frame (database page + # number $pg) with the commit flag set. The frame checksum is + # correct, but the contents of the database page are corrupt. + # + # The page-size in the log file header is set to $pgsz. If the + # WAL code considers $pgsz to be a valid SQLite database file page-size, + # the database will be corrupt (because the garbage frame contents + # will be treated as valid content). If $pgsz is invalid (too small + # or too large), the db will not be corrupt as the log file will + # be ignored. + # + set walhdr [binary format IIIIII 931071618 3007000 $pgsz 1234 22 23] + set framebody [randomblob $pgsz] + set framehdr [binary format IIII $pg 5 22 23] + set c1 0 + set c2 0 + logcksum c1 c2 $walhdr + + append walhdr [binary format II $c1 $c2] + logcksum c1 c2 [string range $framehdr 0 7] + logcksum c1 c2 $framebody + set framehdr [binary format IIIIII $pg 5 22 23 $c1 $c2] + + set fd [open test.db-wal w] + fconfigure $fd -encoding binary -translation binary + puts -nonewline $fd $walhdr + puts -nonewline $fd $framehdr + puts -nonewline $fd $framebody + close $fd + + file size test.db-wal + } [wal_file_size 1 $pgsz] + + do_test wal-18.2.$tn.$pg.5 { + sqlite3 db test.db + set rc [catch { db one {PRAGMA integrity_check} } msg] + expr { $rc!=0 || $msg!="ok" } + } $works + + db close + } +} + +#------------------------------------------------------------------------- +# The following test - wal-19.* - fixes a bug that was present during +# development. +# +# When a database connection in WAL mode is closed, it attempts an +# EXCLUSIVE lock on the database file. If the lock is obtained, the +# connection knows that it is the last connection to disconnect from +# the database, so it runs a checkpoint operation. The bug was that +# the connection was not updating its private copy of the wal-index +# header before doing so, meaning that it could checkpoint an old +# snapshot. +# +do_test wal-19.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + } + execsql { SELECT * FROM t1 } db2 +} {1 2 3 4} +do_test wal-19.2 { + execsql { + INSERT INTO t1 VALUES(5, 6); + SELECT * FROM t1; + } +} {1 2 3 4 5 6} +do_test wal-19.3 { + db close + db2 close + file exists test.db-wal +} {0} +do_test wal-19.4 { + # When the bug was present, the following was returning {1 2 3 4} only, + # as [db2] had an out-of-date copy of the wal-index header when it was + # closed. + # + sqlite3 db test.db + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6} + +#------------------------------------------------------------------------- +# This test - wal-20.* - uses two connections. One in this process and +# the other in an external process. The procedure is: +# +# 1. Using connection 1, create the database schema. +# +# 2. Using connection 2 (in an external process), add so much +# data to the database without checkpointing that a wal-index +# larger than 64KB is required. +# +# 3. Using connection 1, checkpoint the database. Make sure all +# the data is present and the database is not corrupt. +# +# At one point, SQLite was failing to grow the mapping of the wal-index +# file in step 3 and the checkpoint was corrupting the database file. +# +do_test wal-20.1 { + catch {db close} + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(randomblob(900)); + SELECT count(*) FROM t1; + } +} {wal 1} +do_test wal-20.2 { + set ::buddy [launch_testfixture] + testfixture $::buddy { + sqlite3 db test.db + db transaction { db eval { + PRAGMA wal_autocheckpoint = 0; + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 256 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 512 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 1024 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 2048 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4096 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8192 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16384 */ + } } + } +} {0} +do_test wal-20.3 { + close $::buddy + execsql { + PRAGMA wal_checkpoint; + SELECT count(*) FROM t1; + } +} {16384} +do_test wal-20.4 { + db close + sqlite3 db test.db + execsql { SELECT count(*) FROM t1 } +} {16384} +integrity_check wal-20.5 + +catch { db2 close } +catch { db close } + +do_test wal-21.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 VALUES(5, 6); + INSERT INTO t1 VALUES(7, 8); + INSERT INTO t1 VALUES(9, 10); + INSERT INTO t1 VALUES(11, 12); + } +} {wal} +do_test wal-21.2 { + execsql { + PRAGMA cache_size = 10; + PRAGMA wal_checkpoint; + BEGIN; + SAVEPOINT s; + INSERT INTO t1 SELECT randomblob(900), randomblob(900) FROM t1; + ROLLBACK TO s; + COMMIT; + SELECT * FROM t1; + } +} {1 2 3 4 5 6 7 8 9 10 11 12} +do_test wal-21.3 { + execsql { PRAGMA integrity_check } +} {ok} + +finish_test diff --git a/test/wal2.test b/test/wal2.test new file mode 100644 index 0000000..c4d7fa7 --- /dev/null +++ b/test/wal2.test @@ -0,0 +1,1151 @@ +# 2010 May 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/malloc_common.tcl +source $testdir/wal_common.tcl + +ifcapable !wal {finish_test ; return } + +proc set_tvfs_hdr {file args} { + + # Set $nHdr to the number of bytes in the wal-index header: + set nHdr 48 + set nInt [expr {$nHdr/4}] + + if {[llength $args]>2} { + error {wrong # args: should be "set_tvfs_hdr fileName ?val1? ?val2?"} + } + + set blob [tvfs shm $file] + + if {[llength $args]} { + set ia [lindex $args 0] + set ib $ia + if {[llength $args]==2} { + set ib [lindex $args 1] + } + binary scan $blob a[expr $nHdr*2]a* dummy tail + set blob [binary format i${nInt}i${nInt}a* $ia $ib $tail] + tvfs shm $file $blob + } + + binary scan $blob i${nInt} ints + return $ints +} + +proc incr_tvfs_hdr {file idx incrval} { + set ints [set_tvfs_hdr $file] + set v [lindex $ints $idx] + incr v $incrval + lset ints $idx $v + set_tvfs_hdr $file $ints +} + + +#------------------------------------------------------------------------- +# Test case wal2-1.*: +# +# Set up a small database containing a single table. The database is not +# checkpointed during the test - all content resides in the log file. +# +# Two connections are established to the database file - a writer ([db]) +# and a reader ([db2]). For each of the 8 integer fields in the wal-index +# header (6 fields and 2 checksum values), do the following: +# +# 1. Modify the database using the writer. +# +# 2. Attempt to read the database using the reader. Before the reader +# has a chance to snapshot the wal-index header, increment one +# of the the integer fields (so that the reader ends up with a corrupted +# header). +# +# 3. Check that the reader recovers the wal-index and reads the correct +# database content. +# +do_test wal2-1.0 { + proc tvfs_cb {method filename args} { + set ::filename $filename + return SQLITE_OK + } + + testvfs tvfs + tvfs script tvfs_cb + tvfs filter xShmOpen + + sqlite3 db test.db -vfs tvfs + sqlite3 db2 test.db -vfs tvfs + + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a); + } db2 + execsql { + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + SELECT count(a), sum(a) FROM t1; + } +} {4 10} +do_test wal2-1.1 { + execsql { SELECT count(a), sum(a) FROM t1 } db2 +} {4 10} + +set RECOVER [list \ + {0 1 lock exclusive} {1 7 lock exclusive} \ + {1 7 unlock exclusive} {0 1 unlock exclusive} \ +] +set READ [list \ + {4 1 lock exclusive} {4 1 unlock exclusive} \ + {4 1 lock shared} {4 1 unlock shared} \ +] + +foreach {tn iInsert res wal_index_hdr_mod wal_locks} " + 2 5 {5 15} 0 {$RECOVER $READ} + 3 6 {6 21} 1 {$RECOVER $READ} + 4 7 {7 28} 2 {$RECOVER $READ} + 5 8 {8 36} 3 {$RECOVER $READ} + 6 9 {9 45} 4 {$RECOVER $READ} + 7 10 {10 55} 5 {$RECOVER $READ} + 8 11 {11 66} 6 {$RECOVER $READ} + 9 12 {12 78} 7 {$RECOVER $READ} + 10 13 {13 91} 8 {$RECOVER $READ} + 11 14 {14 105} 9 {$RECOVER $READ} + 12 15 {15 120} -1 {$READ} +" { + + do_test wal2-1.$tn.1 { + execsql { INSERT INTO t1 VALUES($iInsert) } + set ::locks [list] + proc tvfs_cb {method args} { + lappend ::locks [lindex $args 2] + return SQLITE_OK + } + tvfs filter xShmLock + if {$::wal_index_hdr_mod >= 0} { + incr_tvfs_hdr $::filename $::wal_index_hdr_mod 1 + } + execsql { SELECT count(a), sum(a) FROM t1 } db2 + } $res + + do_test wal2-1.$tn.2 { + set ::locks + } $wal_locks +} +db close +db2 close +tvfs delete +file delete -force test.db test.db-wal test.db-journal + +#------------------------------------------------------------------------- +# This test case is very similar to the previous one, except, after +# the reader reads the corrupt wal-index header, but before it has +# a chance to re-read it under the cover of the RECOVER lock, the +# wal-index header is replaced with a valid, but out-of-date, header. +# +# Because the header checksum looks Ok, the reader does not run recovery, +# it simply drops back to a READ lock and proceeds. But because the +# header is out-of-date, the reader reads the out-of-date snapshot. +# +# After this, the header is corrupted again and the reader is allowed +# to run recovery. This time, it sees an up-to-date snapshot of the +# database file. +# +set WRITER [list 0 1 lock exclusive] +set LOCKS [list \ + {0 1 lock exclusive} {0 1 unlock exclusive} \ + {4 1 lock exclusive} {4 1 unlock exclusive} \ + {4 1 lock shared} {4 1 unlock shared} \ +] +do_test wal2-2.0 { + + testvfs tvfs + tvfs script tvfs_cb + tvfs filter xShmOpen + proc tvfs_cb {method args} { + set ::filename [lindex $args 0] + return SQLITE_OK + } + + sqlite3 db test.db -vfs tvfs + sqlite3 db2 test.db -vfs tvfs + + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a); + } db2 + execsql { + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + SELECT count(a), sum(a) FROM t1; + } +} {4 10} +do_test wal2-2.1 { + execsql { SELECT count(a), sum(a) FROM t1 } db2 +} {4 10} + +foreach {tn iInsert res0 res1 wal_index_hdr_mod} { + 2 5 {4 10} {5 15} 0 + 3 6 {5 15} {6 21} 1 + 4 7 {6 21} {7 28} 2 + 5 8 {7 28} {8 36} 3 + 6 9 {8 36} {9 45} 4 + 7 10 {9 45} {10 55} 5 + 8 11 {10 55} {11 66} 6 + 9 12 {11 66} {12 78} 7 +} { + tvfs filter xShmLock + + do_test wal2-2.$tn.1 { + set oldhdr [set_tvfs_hdr $::filename] + execsql { INSERT INTO t1 VALUES($iInsert) } + execsql { SELECT count(a), sum(a) FROM t1 } + } $res1 + + do_test wal2-2.$tn.2 { + set ::locks [list] + proc tvfs_cb {method args} { + set lock [lindex $args 2] + lappend ::locks $lock + if {$lock == $::WRITER} { + set_tvfs_hdr $::filename $::oldhdr + } + return SQLITE_OK + } + + if {$::wal_index_hdr_mod >= 0} { + incr_tvfs_hdr $::filename $::wal_index_hdr_mod 1 + } + execsql { SELECT count(a), sum(a) FROM t1 } db2 + } $res0 + + do_test wal2-2.$tn.3 { + set ::locks + } $LOCKS + + do_test wal2-2.$tn.4 { + set ::locks [list] + proc tvfs_cb {method args} { + set lock [lindex $args 2] + lappend ::locks $lock + return SQLITE_OK + } + + if {$::wal_index_hdr_mod >= 0} { + incr_tvfs_hdr $::filename $::wal_index_hdr_mod 1 + } + execsql { SELECT count(a), sum(a) FROM t1 } db2 + } $res1 +} +db close +db2 close +tvfs delete +file delete -force test.db test.db-wal test.db-journal + + +if 0 { +#------------------------------------------------------------------------- +# This test case - wal2-3.* - tests the response of the library to an +# SQLITE_BUSY when attempting to obtain a READ or RECOVER lock. +# +# wal2-3.0 - 2: SQLITE_BUSY when obtaining a READ lock +# wal2-3.3 - 6: SQLITE_BUSY when obtaining a RECOVER lock +# +do_test wal2-3.0 { + proc tvfs_cb {method args} { + if {$method == "xShmLock"} { + if {[info exists ::locked]} { return SQLITE_BUSY } + } + return SQLITE_OK + } + + proc busyhandler x { + if {$x>3} { unset -nocomplain ::locked } + return 0 + } + + testvfs tvfs + tvfs script tvfs_cb + sqlite3 db test.db -vfs tvfs + db busy busyhandler + + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + + set ::locked 1 + info exists ::locked +} {1} +do_test wal2-3.1 { + execsql { SELECT count(a), sum(a) FROM t1 } +} {4 10} +do_test wal2-3.2 { + info exists ::locked +} {0} + +do_test wal2-3.3 { + proc tvfs_cb {method args} { + if {$method == "xShmLock"} { + if {[info exists ::sabotage]} { + unset -nocomplain ::sabotage + incr_tvfs_hdr [lindex $args 0] 1 1 + } + if {[info exists ::locked] && [lindex $args 2] == "RECOVER"} { + return SQLITE_BUSY + } + } + return SQLITE_OK + } + set ::sabotage 1 + set ::locked 1 + list [info exists ::sabotage] [info exists ::locked] +} {1 1} +do_test wal2-3.4 { + execsql { SELECT count(a), sum(a) FROM t1 } +} {4 10} +do_test wal2-3.5 { + list [info exists ::sabotage] [info exists ::locked] +} {0 0} +db close +tvfs delete +file delete -force test.db test.db-wal test.db-journal + +} + +#------------------------------------------------------------------------- +# Test that a database connection using a VFS that does not support the +# xShmXXX interfaces cannot open a WAL database. +# +do_test wal2-4.1 { + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE data(x); + INSERT INTO data VALUES('need xShmOpen to see this'); + PRAGMA wal_checkpoint; + } +} {wal} +do_test wal2-4.2 { + db close + testvfs tvfs -noshm 1 + sqlite3 db test.db -vfs tvfs + catchsql { SELECT * FROM data } +} {1 {unable to open database file}} +do_test wal2-4.3 { + db close + testvfs tvfs + sqlite3 db test.db -vfs tvfs + catchsql { SELECT * FROM data } +} {0 {{need xShmOpen to see this}}} +db close +tvfs delete + +#------------------------------------------------------------------------- +# Test that if a database connection is forced to run recovery before it +# can perform a checkpoint, it does not transition into RECOVER state. +# +# UPDATE: This has now changed. When running a checkpoint, if recovery is +# required the client grabs all exclusive locks (just as it would for a +# recovery performed as a pre-cursor to a normal database transaction). +# +set expected_locks [list] +lappend expected_locks {1 1 lock exclusive} ;# Lock checkpoint +lappend expected_locks {0 1 lock exclusive} ;# Lock writer +lappend expected_locks {2 6 lock exclusive} ;# Lock recovery & all aReadMark[] +lappend expected_locks {2 6 unlock exclusive} ;# Unlock recovery & aReadMark[] +lappend expected_locks {0 1 unlock exclusive} ;# Unlock writer +lappend expected_locks {3 1 lock exclusive} ;# Lock aReadMark[0] +lappend expected_locks {3 1 unlock exclusive} ;# Unlock aReadMark[0] +lappend expected_locks {1 1 unlock exclusive} ;# Unlock checkpoint +do_test wal2-5.1 { + proc tvfs_cb {method args} { + set ::shm_file [lindex $args 0] + if {$method == "xShmLock"} { lappend ::locks [lindex $args 2] } + return $::tvfs_cb_return + } + set tvfs_cb_return SQLITE_OK + + testvfs tvfs + tvfs script tvfs_cb + + sqlite3 db test.db -vfs tvfs + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE x(y); + INSERT INTO x VALUES(1); + } + + incr_tvfs_hdr $::shm_file 1 1 + set ::locks [list] + execsql { PRAGMA wal_checkpoint } + set ::locks +} $expected_locks +db close +tvfs delete + +#------------------------------------------------------------------------- +# This block, test cases wal2-6.*, tests the operation of WAL with +# "PRAGMA locking_mode=EXCLUSIVE" set. +# +# wal2-6.1.*: Changing to WAL mode before setting locking_mode=exclusive. +# +# wal2-6.2.*: Changing to WAL mode after setting locking_mode=exclusive. +# +# wal2-6.3.*: Changing back to rollback mode from WAL mode after setting +# locking_mode=exclusive. +# +# wal2-6.4.*: Check that xShmLock calls are omitted in exclusive locking +# mode. +# +# wal2-6.5.*: +# +# wal2-6.6.*: Check that if the xShmLock() to reaquire a WAL read-lock when +# exiting exclusive mode fails (i.e. SQLITE_IOERR), then the +# connection silently remains in exclusive mode. +# +do_test wal2-6.1.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + Pragma Journal_Mode = Wal; + Pragma Locking_Mode = Exclusive; + } +} {wal exclusive} +do_test wal2-6.1.2 { + execsql { PRAGMA lock_status } +} {main unlocked temp closed} +do_test wal2-6.1.3 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + PRAGMA lock_status; + } +} {main exclusive temp closed} +do_test wal2-6.1.4 { + execsql { + PRAGMA locking_mode = normal; + PRAGMA lock_status; + } +} {normal main exclusive temp closed} +do_test wal2-6.1.5 { + execsql { + SELECT * FROM t1; + PRAGMA lock_status; + } +} {1 2 main exclusive temp closed} +do_test wal2-6.1.6 { + execsql { + INSERT INTO t1 VALUES(3, 4); + PRAGMA lock_status; + } +} {main shared temp closed} +db close + +do_test wal2-6.2.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + Pragma Locking_Mode = Exclusive; + Pragma Journal_Mode = Wal; + Pragma Lock_Status; + } +} {exclusive wal main exclusive temp closed} +do_test wal2-6.2.2 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + Pragma loCK_STATus; + } +} {main exclusive temp closed} +do_test wal2-6.2.3 { + db close + sqlite3 db test.db + execsql { PRAGMA LOCKING_MODE = EXCLUSIVE } +} {exclusive} +do_test wal2-6.2.4 { + execsql { + SELECT * FROM t1; + pragma lock_status; + } +} {1 2 main shared temp closed} +do_test wal2-6.2.5 { + execsql { + INSERT INTO t1 VALUES(3, 4); + pragma lock_status; + } +} {main exclusive temp closed} +do_test wal2-6.2.6 { + execsql { + PRAGMA locking_mode = NORMAL; + pragma lock_status; + } +} {normal main exclusive temp closed} +do_test wal2-6.2.7 { + execsql { + BEGIN IMMEDIATE; COMMIT; + pragma lock_status; + } +} {main shared temp closed} +do_test wal2-6.2.8 { + execsql { + PRAGMA locking_mode = EXCLUSIVE; + BEGIN IMMEDIATE; COMMIT; + PRAGMA locking_mode = NORMAL; + } + execsql { + SELECT * FROM t1; + pragma lock_status; + } +} {1 2 3 4 main exclusive temp closed} +do_test wal2-6.2.9 { + execsql { + INSERT INTO t1 VALUES(5, 6); + SELECT * FROM t1; + pragma lock_status; + } +} {1 2 3 4 5 6 main shared temp closed} +db close + +do_test wal2-6.3.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + PRAGMA locking_mode = exclusive; + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES('Chico'); + INSERT INTO t1 VALUES('Harpo'); + COMMIT; + } + list [file exists test.db-wal] [file exists test.db-journal] +} {1 0} +do_test wal2-6.3.2 { + execsql { PRAGMA journal_mode = DELETE } + file exists test.db-wal +} {0} +do_test wal2-6.3.3 { + execsql { PRAGMA lock_status } +} {main exclusive temp closed} +do_test wal2-6.3.4 { + execsql { + BEGIN; + INSERT INTO t1 VALUES('Groucho'); + } + list [file exists test.db-wal] [file exists test.db-journal] +} {0 1} +do_test wal2-6.3.5 { + execsql { PRAGMA lock_status } +} {main exclusive temp closed} +do_test wal2-6.3.6 { + execsql { COMMIT } + list [file exists test.db-wal] [file exists test.db-journal] +} {0 1} +do_test wal2-6.3.7 { + execsql { PRAGMA lock_status } +} {main exclusive temp closed} +db close + + +# This test - wal2-6.4.* - uses a single database connection and the +# [testvfs] instrumentation to test that xShmLock() is being called +# as expected when a WAL database is used with locking_mode=exclusive. +# +do_test wal2-6.4.1 { + file delete -force test.db test.db-wal test.db-journal + proc tvfs_cb {method args} { + set ::shm_file [lindex $args 0] + if {$method == "xShmLock"} { lappend ::locks [lindex $args 2] } + return "SQLITE_OK" + } + testvfs tvfs + tvfs script tvfs_cb + sqlite3 db test.db -vfs tvfs +} {} + +set RECOVERY { + {0 1 lock exclusive} {1 7 lock exclusive} + {1 7 unlock exclusive} {0 1 unlock exclusive} +} +set READMARK0_READ { + {3 1 lock shared} {3 1 unlock shared} +} +set READMARK0_WRITE { + {3 1 lock shared} + {0 1 lock exclusive} {3 1 unlock shared} + {4 1 lock exclusive} {4 1 unlock exclusive} {4 1 lock shared} + {0 1 unlock exclusive} {4 1 unlock shared} +} +set READMARK1_SET { + {4 1 lock exclusive} {4 1 unlock exclusive} +} +set READMARK1_READ { + {4 1 lock shared} {4 1 unlock shared} +} + +foreach {tn sql res expected_locks} { + 2 { + PRAGMA journal_mode = WAL; + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES('Leonard'); + INSERT INTO t1 VALUES('Arthur'); + COMMIT; + } {wal} { + $RECOVERY + $READMARK0_WRITE + } + + 3 { + # This test should do the READMARK1_SET locking to populate the + # aReadMark[1] slot with the current mxFrame value. Followed by + # READMARK1_READ to read the database. + # + SELECT * FROM t1 + } {Leonard Arthur} { + $READMARK1_SET + $READMARK1_READ + } + + 4 { + # aReadMark[1] is already set to mxFrame. So just READMARK1_READ + # this time, not READMARK1_SET. + # + SELECT * FROM t1 ORDER BY x + } {Arthur Leonard} { + $READMARK1_READ + } + + 5 { + PRAGMA locking_mode = exclusive + } {exclusive} { } + + 6 { + INSERT INTO t1 VALUES('Julius Henry'); + SELECT * FROM t1; + } {Leonard Arthur {Julius Henry}} { + $READMARK1_READ + } + + 7 { + INSERT INTO t1 VALUES('Karl'); + SELECT * FROM t1; + } {Leonard Arthur {Julius Henry} Karl} { } + + 8 { + PRAGMA locking_mode = normal + } {normal} { } + + 9 { + SELECT * FROM t1 ORDER BY x + } {Arthur {Julius Henry} Karl Leonard} { } + + 10 { + DELETE FROM t1 + } {} { + $READMARK1_READ + } + + 11 { + SELECT * FROM t1 + } {} { + $READMARK1_SET + $READMARK1_READ + } +} { + + set L [list] + foreach el [subst $expected_locks] { lappend L $el } + + set S "" + foreach sq [split $sql "\n"] { + set sq [string trim $sq] + if {[string match {#*} $sq]==0} {append S "$sq\n"} + } + + set ::locks [list] + do_test wal2-6.4.$tn.1 { execsql $S } $res + do_test wal2-6.4.$tn.2 { set ::locks } $L +} + +db close +tvfs delete + +do_test wal2-6.5.1 { + sqlite3 db test.db + execsql { + PRAGMA journal_mode = wal; + PRAGMA locking_mode = exclusive; + CREATE TABLE t2(a, b); + PRAGMA wal_checkpoint; + INSERT INTO t2 VALUES('I', 'II'); + PRAGMA journal_mode; + } +} {wal exclusive wal} +do_test wal2-6.5.2 { + execsql { + PRAGMA locking_mode = normal; + INSERT INTO t2 VALUES('III', 'IV'); + PRAGMA locking_mode = exclusive; + SELECT * FROM t2; + } +} {normal exclusive I II III IV} +do_test wal2-6.5.3 { + execsql { PRAGMA wal_checkpoint } +} {} +db close + +proc lock_control {method filename handle spec} { + foreach {start n op type} $spec break + if {$op == "lock"} { return SQLITE_IOERR } + return SQLITE_OK +} +do_test wal2-6.6.1 { + testvfs T + T script lock_control + T filter {} + sqlite3 db test.db -vfs T + execsql { PRAGMA locking_mode = exclusive } + execsql { INSERT INTO t2 VALUES('V', 'VI') } +} {} +do_test wal2-6.6.2 { + execsql { PRAGMA locking_mode = normal } + T filter xShmLock + execsql { INSERT INTO t2 VALUES('VII', 'VIII') } +} {} +do_test wal2-6.6.3 { + # At this point the connection should still be in exclusive-mode, even + # though it tried to exit exclusive-mode when committing the INSERT + # statement above. To exit exclusive mode, SQLite has to take a read-lock + # on the WAL file using xShmLock(). Since that call failed, it remains + # in exclusive mode. + # + sqlite3 db2 test.db -vfs T + catchsql { SELECT * FROM t2 } db2 +} {1 {database is locked}} +do_test wal2-6.6.2 { + db2 close + T filter {} + execsql { INSERT INTO t2 VALUES('IX', 'X') } +} {} +do_test wal2-6.6.3 { + # This time, we have successfully exited exclusive mode. So the second + # connection can read the database. + sqlite3 db2 test.db -vfs T + catchsql { SELECT * FROM t2 } db2 +} {0 {I II III IV V VI VII VIII IX X}} + +db close +db2 close +T delete + +#------------------------------------------------------------------------- +# Test a theory about the checksum algorithm. Theory was false and this +# test did not provoke a bug. +# +file delete -force test.db test.db-wal test.db-journal +do_test wal2-7.1.1 { + sqlite3 db test.db + execsql { + PRAGMA page_size = 4096; + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + } + file size test.db +} {4096} +do_test wal2-7.1.2 { + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + hexio_write test2.db-wal 48 FF +} {1} +do_test wal2-7.1.3 { + sqlite3 db2 test2.db + execsql { PRAGMA wal_checkpoint } db2 + execsql { SELECT * FROM sqlite_master } db2 +} {} +db close +db2 close +file delete -force test.db test.db-wal test.db-journal +do_test wal2-8.1.2 { + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(8188*1020)); + CREATE TABLE t2(y); + } + execsql { + PRAGMA wal_checkpoint; + SELECT rootpage>=8192 FROM sqlite_master WHERE tbl_name = 't2'; + } +} {1} +do_test wal2-8.1.3 { + execsql { + PRAGMA cache_size = 10; + CREATE TABLE t3(z); + BEGIN; + INSERT INTO t3 VALUES(randomblob(900)); + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t2 VALUES('hello'); + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + ROLLBACK; + } + execsql { + INSERT INTO t2 VALUES('goodbye'); + INSERT INTO t3 SELECT randomblob(900) FROM t3; + INSERT INTO t3 SELECT randomblob(900) FROM t3; + } +} {} +do_test wal2-8.1.4 { + sqlite3 db2 test.db + execsql { SELECT * FROM t2 } +} {goodbye} +db2 close +db close + +#------------------------------------------------------------------------- +# Test that even if the checksums for both are valid, if the two copies +# of the wal-index header in the wal-index do not match, the client +# runs (or at least tries to run) database recovery. +# +# +proc get_name {method args} { set ::filename [lindex $args 0] ; tvfs filter {} } +testvfs tvfs +tvfs script get_name +tvfs filter xShmOpen + +file delete -force test.db test.db-wal test.db-journal +do_test wal2-9.1 { + sqlite3 db test.db -vfs tvfs + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE x(y); + INSERT INTO x VALUES('Barton'); + INSERT INTO x VALUES('Deakin'); + } + + # Set $wih(1) to the contents of the wal-index header after + # the frames associated with the first two rows in table 'x' have + # been inserted. Then insert one more row and set $wih(2) + # to the new value of the wal-index header. + # + # If the $wih(1) is written into the wal-index before running + # a read operation, the client will see only the first two rows. If + # $wih(2) is written into the wal-index, the client will see + # three rows. If an invalid header is written into the wal-index, then + # the client will run recovery and see three rows. + # + set wih(1) [set_tvfs_hdr $::filename] + execsql { INSERT INTO x VALUES('Watson') } + set wih(2) [set_tvfs_hdr $::filename] + + sqlite3 db2 test.db -vfs tvfs + execsql { SELECT * FROM x } db2 +} {Barton Deakin Watson} + +foreach {tn hdr1 hdr2 res} [list \ + 3 $wih(1) $wih(1) {Barton Deakin} \ + 4 $wih(1) $wih(2) {Barton Deakin Watson} \ + 5 $wih(2) $wih(1) {Barton Deakin Watson} \ + 6 $wih(2) $wih(2) {Barton Deakin Watson} \ + 7 $wih(1) $wih(1) {Barton Deakin} \ + 8 {0 0 0 0 0 0 0 0 0 0 0 0} {0 0 0 0 0 0 0 0 0 0 0 0} {Barton Deakin Watson} +] { + do_test wal2-9.$tn { + set_tvfs_hdr $::filename $hdr1 $hdr2 + execsql { SELECT * FROM x } db2 + } $res +} + +db2 close +db close + +#------------------------------------------------------------------------- +# This block of tests - wal2-10.* - focus on the libraries response to +# new versions of the wal or wal-index formats. +# +# wal2-10.1.*: Test that the library refuses to "recover" a new WAL +# format. +# +# wal2-10.2.*: Test that the library refuses to read or write a database +# if the wal-index version is newer than it understands. +# +# At time of writing, the only versions of the wal and wal-index formats +# that exist are versions 3007000 (corresponding to SQLite version 3.7.0, +# the first version of SQLite to feature wal mode). +# +do_test wal2-10.1.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + } + faultsim_save_and_close +} {} +do_test wal2-10.1.2 { + faultsim_restore_and_reopen + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test wal2-10.1.3 { + faultsim_restore_and_reopen + set hdr [wal_set_walhdr test.db-wal] + lindex $hdr 1 +} {3007000} +do_test wal2-10.1.4 { + lset hdr 1 3007001 + wal_set_walhdr test.db-wal $hdr + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} + +testvfs tvfs -default 1 +do_test wal2-10.2.1 { + faultsim_restore_and_reopen + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test wal2-10.2.2 { + set hdr [set_tvfs_hdr $::filename] + lindex $hdr 0 +} {3007000} +breakpoint +do_test wal2-10.2.3 { + lset hdr 0 3007001 + wal_fix_walindex_cksum hdr + set_tvfs_hdr $::filename $hdr + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} +db close +tvfs delete + +#------------------------------------------------------------------------- +# This block of tests - wal2-11.* - tests that it is not possible to put +# the library into an infinite loop by presenting it with a corrupt +# hash table (one that appears to contain a single chain of infinite +# length). +# +# wal2-11.1.*: While reading the hash-table. +# +# wal2-11.2.*: While writing the hash-table. +# +testvfs tvfs -default 1 +do_test wal2-11.0 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t1 VALUES(7, 8, 9); + SELECT * FROM t1; + } +} {wal 1 2 3 4 5 6 7 8 9} + +do_test wal2-11.1.1 { + sqlite3 db2 test.db + execsql { SELECT name FROM sqlite_master } db2 +} {t1} + +# Set all zeroed slots in the first hash table to invalid values. +# +set blob [string range [tvfs shm $::filename] 0 16383] +set I [string range [tvfs shm $::filename] 16384 end] +binary scan $I t* L +set I [list] +foreach p $L { + lappend I [expr $p ? $p : 400] +} +append blob [binary format t* $I] +tvfs shm $::filename $blob +do_test wal2-11.2 { + catchsql { INSERT INTO t1 VALUES(10, 11, 12) } +} {1 {database disk image is malformed}} + +# Fill up the hash table on the first page of shared memory with 0x55 bytes. +# +set blob [string range [tvfs shm $::filename] 0 16383] +append blob [string repeat [binary format c 55] 16384] +tvfs shm $::filename $blob +do_test wal2-11.3 { + catchsql { SELECT * FROM t1 } db2 +} {1 {database disk image is malformed}} + +db close +db2 close +tvfs delete + +#------------------------------------------------------------------------- +# If a connection is required to create a WAL or SHM file, it creates +# the new files with the same file-system permissions as the database +# file itself. Test this. +# +if {$::tcl_platform(platform) == "unix"} { + faultsim_delete_and_reopen + set umask [exec /bin/sh -c umask] + + do_test wal2-12.1 { + sqlite3 db test.db + execsql { + CREATE TABLE tx(y, z); + PRAGMA journal_mode = WAL; + } + db close + list [file exists test.db-wal] [file exists test.db-shm] + } {0 0} + + foreach {tn permissions} { + 1 00644 + 2 00666 + 3 00600 + 4 00755 + } { + set effective [format %.5o [expr $permissions & ~$umask]] + do_test wal2-12.2.$tn.1 { + file attributes test.db -permissions $permissions + file attributes test.db -permissions + } $permissions + do_test wal2-12.2.$tn.2 { + list [file exists test.db-wal] [file exists test.db-shm] + } {0 0} + do_test wal2-12.2.$tn.3 { + sqlite3 db test.db + execsql { INSERT INTO tx DEFAULT VALUES } + list [file exists test.db-wal] [file exists test.db-shm] + } {1 1} + do_test wal2-12.2.$tn.4 { + list [file attr test.db-wal -perm] [file attr test.db-shm -perm] + } [list $effective $effective] + do_test wal2-12.2.$tn.5 { + db close + list [file exists test.db-wal] [file exists test.db-shm] + } {0 0} + } +} + +#------------------------------------------------------------------------- +# Test the libraries response to discovering that one or more of the +# database, wal or shm files cannot be opened, or can only be opened +# read-only. +# +if {$::tcl_platform(platform) == "unix"} { + proc perm {} { + set L [list] + foreach f {test.db test.db-wal test.db-shm} { + if {[file exists $f]} { + lappend L [file attr $f -perm] + } else { + lappend L {} + } + } + set L + } + + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES('3.14', '2.72'); + } + do_test wal2-13.1.1 { + list [file exists test.db-shm] [file exists test.db-wal] + } {1 1} + faultsim_save_and_close + + foreach {tn db_perm wal_perm shm_perm can_open can_read can_write} { + 2 00644 00644 00644 1 1 1 + 3 00644 00400 00644 1 1 0 + 4 00644 00644 00400 1 0 0 + 5 00400 00644 00644 1 1 0 + + 7 00644 00000 00644 1 0 0 + 8 00644 00644 00000 1 0 0 + 9 00000 00644 00644 0 0 0 + } { + faultsim_restore + do_test wal2-13.$tn.1 { + file attr test.db -perm $db_perm + file attr test.db-wal -perm $wal_perm + file attr test.db-shm -perm $shm_perm + + set L [file attr test.db -perm] + lappend L [file attr test.db-wal -perm] + lappend L [file attr test.db-shm -perm] + } [list $db_perm $wal_perm $shm_perm] + + # If $can_open is true, then it should be possible to open a database + # handle. Otherwise, if $can_open is 0, attempting to open the db + # handle throws an "unable to open database file" exception. + # + set r(1) {0 ok} + set r(0) {1 {unable to open database file}} + do_test wal2-13.$tn.2 { + list [catch {sqlite3 db test.db ; set {} ok} msg] $msg + } $r($can_open) + + if {$can_open} { + + # If $can_read is true, then the client should be able to read from + # the database file. If $can_read is false, attempting to read should + # throw the "unable to open database file" exception. + # + set a(0) {1 {unable to open database file}} + set a(1) {0 {3.14 2.72}} + do_test wal2-13.$tn.3 { + catchsql { SELECT * FROM t1 } + } $a($can_read) + + # Now try to write to the db file. If the client can read but not + # write, then it should throw the familiar "unable to open db file" + # exception. If it can read but not write, the exception should + # be "attempt to write a read only database". + # + # If the client can read and write, the operation should succeed. + # + set b(0,0) {1 {unable to open database file}} + set b(1,0) {1 {attempt to write a readonly database}} + set b(1,1) {0 {}} + do_test wal2-13.$tn.4 { + catchsql { INSERT INTO t1 DEFAULT VALUES } + } $b($can_read,$can_write) + } + catch { db close } + } +} + +finish_test + diff --git a/test/wal3.test b/test/wal3.test new file mode 100644 index 0000000..b344c14 --- /dev/null +++ b/test/wal3.test @@ -0,0 +1,738 @@ +# 2010 April 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/wal_common.tcl +source $testdir/malloc_common.tcl +ifcapable !wal {finish_test ; return } + +set a_string_counter 1 +proc a_string {n} { + global a_string_counter + incr a_string_counter + string range [string repeat "${a_string_counter}." $n] 1 $n +} +db func a_string a_string + +#------------------------------------------------------------------------- +# When a rollback or savepoint rollback occurs, the client may remove +# elements from one of the hash tables in the wal-index. This block +# of test cases tests that nothing appears to go wrong when this is +# done. +# +do_test wal3-1.0 { + execsql { + PRAGMA cache_size = 2000; + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = off; + PRAGMA synchronous = normal; + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES( a_string(800) ); /* 1 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 2 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 4 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 8 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 16 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 32 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 64 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 128*/ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 256 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 512 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 1024 */ + INSERT INTO t1 SELECT a_string(800) FROM t1; /* 2048 */ + INSERT INTO t1 SELECT a_string(800) FROM t1 LIMIT 1970; /* 4018 */ + COMMIT; + PRAGMA cache_size = 10; + } + wal_frame_count test.db-wal 1024 +} 4056 + +for {set i 1} {$i < 50} {incr i} { + + do_test wal3-1.$i.1 { + set str [a_string 800] + execsql { UPDATE t1 SET x = $str WHERE rowid = $i } + lappend L [wal_frame_count test.db-wal 1024] + execsql { + BEGIN; + INSERT INTO t1 SELECT a_string(800) FROM t1 LIMIT 100; + ROLLBACK; + PRAGMA integrity_check; + } + } {ok} + + # Check that everything looks OK from the point of view of an + # external connection. + # + sqlite3 db2 test.db + do_test wal3-1.$i.2 { + execsql { SELECT count(*) FROM t1 } db2 + } 4018 + do_test wal3-1.$i.3 { + execsql { SELECT x FROM t1 WHERE rowid = $i } + } $str + do_test wal3-1.$i.4 { + execsql { PRAGMA integrity_check } db2 + } {ok} + db2 close + + # Check that the file-system in its current state can be recovered. + # + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + file delete -force test2.db-journal + sqlite3 db2 test2.db + do_test wal3-1.$i.5 { + execsql { SELECT count(*) FROM t1 } db2 + } 4018 + do_test wal3-1.$i.6 { + execsql { SELECT x FROM t1 WHERE rowid = $i } + } $str + do_test wal3-1.$i.7 { + execsql { PRAGMA integrity_check } db2 + } {ok} + db2 close +} + +do_multiclient_test i { + + set testname(1) multiproc + set testname(2) singleproc + set tn $testname($i) + + do_test wal3-2.$tn.1 { + sql1 { + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + } + sql1 { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'one'); + BEGIN; + SELECT * FROM t1; + } + } {1 one} + do_test wal3-2.$tn.2 { + sql2 { + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES(2, 'two'); + BEGIN; + SELECT * FROM t2; + } + } {2 two} + do_test wal3-2.$tn.3 { + sql3 { + CREATE TABLE t3(a, b); + INSERT INTO t3 VALUES(3, 'three'); + BEGIN; + SELECT * FROM t3; + } + } {3 three} + + # Try to checkpoint the database using [db]. It should be possible to + # checkpoint everything except the table added by [db3] (checkpointing + # these frames would clobber the snapshot currently being used by [db2]). + # + # After [db2] has committed, a checkpoint can copy the entire log to the + # database file. Checkpointing after [db3] has committed is therefore a + # no-op, as the entire log has already been backfilled. + # + do_test wal3-2.$tn.4 { + sql1 { + COMMIT; + PRAGMA wal_checkpoint; + } + file size test.db + } [expr $AUTOVACUUM ? 4*1024 : 3*1024] + do_test wal3-2.$tn.5 { + sql2 { + COMMIT; + PRAGMA wal_checkpoint; + } + file size test.db + } [expr $AUTOVACUUM ? 5*1024 : 4*1024] + do_test wal3-2.$tn.6 { + sql3 { + COMMIT; + PRAGMA wal_checkpoint; + } + file size test.db + } [expr $AUTOVACUUM ? 5*1024 : 4*1024] +} +catch {db close} + +#------------------------------------------------------------------------- +# Test that that for the simple test: +# +# CREATE TABLE x(y); +# INSERT INTO x VALUES('z'); +# PRAGMA wal_checkpoint; +# +# in WAL mode the xSync method is invoked as expected for each of +# synchronous=off, synchronous=normal and synchronous=full. +# +foreach {tn syncmode synccount} { + 1 off + {} + 2 normal + {test.db-wal normal test.db normal} + 3 full + {test.db-wal normal test.db-wal normal test.db-wal normal test.db normal} +} { + + proc sync_counter {args} { + foreach {method filename id flags} $args break + lappend ::syncs [file tail $filename] $flags + } + do_test wal3-3.$tn { + file delete -force test.db test.db-wal test.db-journal + + testvfs T + T filter {} + T script sync_counter + sqlite3 db test.db -vfs T + + execsql "PRAGMA synchronous = $syncmode" + execsql { PRAGMA journal_mode = WAL } + + set ::syncs [list] + T filter xSync + execsql { + CREATE TABLE x(y); + INSERT INTO x VALUES('z'); + PRAGMA wal_checkpoint; + } + T filter {} + set ::syncs + } $synccount + + db close + T delete +} + +#------------------------------------------------------------------------- +# When recovering the contents of a WAL file, a process obtains the WRITER +# lock, then locks all other bytes before commencing recovery. If it fails +# to lock all other bytes (because some other process is holding a read +# lock) it should retry up to 100 times. Then return SQLITE_PROTOCOL to the +# caller. Test this (test case wal3-4.3). +# +# Also test the effect of hitting an SQLITE_BUSY while attempting to obtain +# the WRITER lock (should be the same). Test case wal3-4.4. +# +proc lock_callback {method filename handle lock} { + lappend ::locks $lock +} +do_test wal3-4.1 { + testvfs T + T filter xShmLock + T script lock_callback + set ::locks [list] + sqlite3 db test.db -vfs T + execsql { SELECT * FROM x } + lrange $::locks 0 3 +} [list {0 1 lock exclusive} {1 7 lock exclusive} \ + {1 7 unlock exclusive} {0 1 unlock exclusive} \ +] +do_test wal3-4.2 { + db close + set ::locks [list] + sqlite3 db test.db -vfs T + execsql { SELECT * FROM x } + lrange $::locks 0 3 +} [list {0 1 lock exclusive} {1 7 lock exclusive} \ + {1 7 unlock exclusive} {0 1 unlock exclusive} \ +] +proc lock_callback {method filename handle lock} { + if {$lock == "1 7 lock exclusive"} { return SQLITE_BUSY } + return SQLITE_OK +} +puts " Warning: This next test case causes SQLite to call xSleep(1) 100 times." +puts " Normally this equates to a 100ms delay, but if SQLite is built on unix" +puts " without HAVE_USLEEP defined, it may be 100 seconds." +do_test wal3-4.3 { + db close + set ::locks [list] + sqlite3 db test.db -vfs T + catchsql { SELECT * FROM x } +} {1 {locking protocol}} + +puts " Warning: Same again!" +proc lock_callback {method filename handle lock} { + if {$lock == "0 1 lock exclusive"} { return SQLITE_BUSY } + return SQLITE_OK +} +do_test wal3-4.4 { + db close + set ::locks [list] + sqlite3 db test.db -vfs T + catchsql { SELECT * FROM x } +} {1 {locking protocol}} +db close +T delete + + +#------------------------------------------------------------------------- +# Only one client may run recovery at a time. Test this mechanism. +# +# When client-2 tries to open a read transaction while client-1 is +# running recovery, it fails to obtain a lock on an aReadMark[] slot +# (because they are all locked by recovery). It then tries to obtain +# a shared lock on the RECOVER lock to see if there really is a +# recovery running or not. +# +# This block of tests checks the effect of an SQLITE_BUSY or SQLITE_IOERR +# being returned when client-2 attempts a shared lock on the RECOVER byte. +# +# An SQLITE_BUSY should be converted to an SQLITE_BUSY_RECOVERY. An +# SQLITE_IOERR should be returned to the caller. +# +do_test wal3-5.1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + } + faultsim_save_and_close +} {} + +testvfs T -default 1 +T script method_callback + +proc method_callback {method args} { + if {$method == "xShmBarrier"} { + incr ::barrier_count + if {$::barrier_count == 2} { + # This code is executed within the xShmBarrier() callback invoked + # by the client running recovery as part of writing the recovered + # wal-index header. If a second client attempts to access the + # database now, it reads a corrupt (partially written) wal-index + # header. But it cannot even get that far, as the first client + # is still holding all the locks (recovery takes an exclusive lock + # on *all* db locks, preventing access by any other client). + # + # If global variable ::wal3_do_lockfailure is non-zero, then set + # things up so that an IO error occurs within an xShmLock() callback + # made by the second client (aka [db2]). + # + sqlite3 db2 test.db + if { $::wal3_do_lockfailure } { T filter xShmLock } + set ::testrc [ catch { db2 eval "SELECT * FROM t1" } ::testmsg ] + T filter {} + db2 close + } + } + + if {$method == "xShmLock"} { + foreach {file handle spec} $args break + if { $spec == "2 1 lock shared" } { + return SQLITE_IOERR + } + } + + return SQLITE_OK +} + +# Test a normal SQLITE_BUSY return. +# +T filter xShmBarrier +set testrc "" +set testmsg "" +set barrier_count 0 +set wal3_do_lockfailure 0 +do_test wal3-5.2 { + faultsim_restore_and_reopen + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test wal3-5.3 { + list $::testrc $::testmsg +} {1 {database is locked}} +db close + +# Test an SQLITE_IOERR return. +# +T filter xShmBarrier +set barrier_count 0 +set wal3_do_lockfailure 1 +set testrc "" +set testmsg "" +do_test wal3-5.4 { + faultsim_restore_and_reopen + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test wal3-5.5 { + list $::testrc $::testmsg +} {1 {disk I/O error}} + +db close +T delete + +#------------------------------------------------------------------------- +# When opening a read-transaction on a database, if the entire log has +# already been copied to the database file, the reader grabs a special +# kind of read lock (on aReadMark[0]). This set of test cases tests the +# outcome of the following: +# +# + The reader discovering that between the time when it determined +# that the log had been completely backfilled and the lock is obtained +# that a writer has written to the log. In this case the reader should +# acquire a different read-lock (not aReadMark[0]) and read the new +# snapshot. +# +# + The attempt to obtain the lock on aReadMark[0] fails with SQLITE_BUSY. +# This can happen if a checkpoint is ongoing. In this case also simply +# obtain a different read-lock. +# +catch {db close} +testvfs T -default 1 +do_test wal3-6.1.1 { + file delete -force test.db test.db-journal test.db wal + sqlite3 db test.db + execsql { PRAGMA journal_mode = WAL } + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES('o', 't'); + INSERT INTO t1 VALUES('t', 'f'); + } +} {} +do_test wal3-6.1.2 { + sqlite3 db2 test.db + sqlite3 db3 test.db + execsql { BEGIN ; SELECT * FROM t1 } db3 +} {o t t f} +do_test wal3-6.1.3 { + execsql { PRAGMA wal_checkpoint } db2 +} {} + +# At this point the log file has been fully checkpointed. However, +# connection [db3] holds a lock that prevents the log from being wrapped. +# Test case 3.6.1.4 has [db] attempt a read-lock on aReadMark[0]. But +# as it is obtaining the lock, [db2] appends to the log file. +# +T filter xShmLock +T script lock_callback +proc lock_callback {method file handle spec} { + if {$spec == "3 1 lock shared"} { + # This is the callback for [db] to obtain the read lock on aReadMark[0]. + # Disable future callbacks using [T filter {}] and write to the log + # file using [db2]. [db3] is preventing [db2] from wrapping the log + # here, so this is an append. + T filter {} + db2 eval { INSERT INTO t1 VALUES('f', 's') } + } + return SQLITE_OK +} +do_test wal3-6.1.4 { + execsql { + BEGIN; + SELECT * FROM t1; + } +} {o t t f f s} + +# [db] should be left holding a read-lock on some slot other than +# aReadMark[0]. Test this by demonstrating that the read-lock is preventing +# the log from being wrapped. +# +do_test wal3-6.1.5 { + db3 eval COMMIT + db2 eval { PRAGMA wal_checkpoint } + set sz1 [file size test.db-wal] + db2 eval { INSERT INTO t1 VALUES('s', 'e') } + set sz2 [file size test.db-wal] + expr {$sz2>$sz1} +} {1} + +# Test that if [db2] had not interfered when [db] was trying to grab +# aReadMark[0], it would have been possible to wrap the log in 3.6.1.5. +# +do_test wal3-6.1.6 { + execsql { COMMIT } + execsql { PRAGMA wal_checkpoint } db2 + execsql { + BEGIN; + SELECT * FROM t1; + } +} {o t t f f s s e} +do_test wal3-6.1.7 { + db2 eval { PRAGMA wal_checkpoint } + set sz1 [file size test.db-wal] + db2 eval { INSERT INTO t1 VALUES('n', 't') } + set sz2 [file size test.db-wal] + expr {$sz2==$sz1} +} {1} + +db3 close +db2 close +db close + +do_test wal3-6.2.1 { + file delete -force test.db test.db-journal test.db wal + sqlite3 db test.db + sqlite3 db2 test.db + execsql { PRAGMA journal_mode = WAL } + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES('h', 'h'); + INSERT INTO t1 VALUES('l', 'b'); + } +} {} + +T filter xShmLock +T script lock_callback +proc lock_callback {method file handle spec} { + if {$spec == "3 1 unlock exclusive"} { + T filter {} + set ::R [db2 eval { + BEGIN; + SELECT * FROM t1; + }] + } +} +do_test wal3-6.2.2 { + execsql { PRAGMA wal_checkpoint } +} {} +do_test wal3-6.2.3 { + set ::R +} {h h l b} +do_test wal3-6.2.4 { + set sz1 [file size test.db-wal] + execsql { INSERT INTO t1 VALUES('b', 'c'); } + set sz2 [file size test.db-wal] + expr {$sz2 > $sz1} +} {1} +do_test wal3-6.2.5 { + db2 eval { COMMIT } + execsql { PRAGMA wal_checkpoint } + set sz1 [file size test.db-wal] + execsql { INSERT INTO t1 VALUES('n', 'o'); } + set sz2 [file size test.db-wal] + expr {$sz2 == $sz1} +} {1} + +db2 close +db close +T delete + +#------------------------------------------------------------------------- +# When opening a read-transaction on a database, if the entire log has +# not yet been copied to the database file, the reader grabs a read +# lock on aReadMark[x], where x>0. The following test cases experiment +# with the outcome of the following: +# +# + The reader discovering that between the time when it read the +# wal-index header and the lock was obtained that a writer has +# written to the log. In this case the reader should re-read the +# wal-index header and lock a snapshot corresponding to the new +# header. +# +# + The value in the aReadMark[x] slot has been modified since it was +# read. +# +catch {db close} +testvfs T -default 1 +do_test wal3-7.1.1 { + file delete -force test.db test.db-journal test.db wal + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE blue(red PRIMARY KEY, green); + } +} {wal} + +T script method_callback +T filter xOpen +proc method_callback {method args} { + if {$method == "xOpen"} { return "reader" } +} +do_test wal3-7.1.2 { + sqlite3 db2 test.db + execsql { SELECT * FROM blue } db2 +} {} + +T filter xShmLock +set ::locks [list] +proc method_callback {method file handle spec} { + if {$handle != "reader" } { return } + if {$method == "xShmLock"} { + catch { execsql { INSERT INTO blue VALUES(1, 2) } } + catch { execsql { INSERT INTO blue VALUES(3, 4) } } + } + lappend ::locks $spec +} +do_test wal3-7.1.3 { + execsql { SELECT * FROM blue } db2 +} {1 2 3 4} +do_test wal3-7.1.4 { + set ::locks +} {{4 1 lock shared} {4 1 unlock shared} {5 1 lock shared} {5 1 unlock shared}} + +set ::locks [list] +proc method_callback {method file handle spec} { + if {$handle != "reader" } { return } + if {$method == "xShmLock"} { + catch { execsql { INSERT INTO blue VALUES(5, 6) } } + } + lappend ::locks $spec +} +do_test wal3-7.2.1 { + execsql { SELECT * FROM blue } db2 +} {1 2 3 4 5 6} +do_test wal3-7.2.2 { + set ::locks +} {{5 1 lock shared} {5 1 unlock shared} {4 1 lock shared} {4 1 unlock shared}} + +db close +db2 close +T delete + +#------------------------------------------------------------------------- +# +do_test wal3-8.1 { + file delete -force test.db test.db-journal test.db wal + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE b(c); + INSERT INTO b VALUES('Tehran'); + INSERT INTO b VALUES('Qom'); + INSERT INTO b VALUES('Markazi'); + PRAGMA wal_checkpoint; + } +} {wal} +do_test wal3-8.2 { + execsql { SELECT * FROM b } +} {Tehran Qom Markazi} +do_test wal3-8.3 { + db eval { SELECT * FROM b } { + db eval { INSERT INTO b VALUES('Qazvin') } + set r [db2 eval { SELECT * FROM b }] + break + } + set r +} {Tehran Qom Markazi Qazvin} +do_test wal3-8.4 { + execsql { + INSERT INTO b VALUES('Gilan'); + INSERT INTO b VALUES('Ardabil'); + } +} {} +db2 close + +faultsim_save_and_close +testvfs T -default 1 +faultsim_restore_and_reopen +T filter xShmLock +T script lock_callback + +proc lock_callback {method file handle spec} { + if {$spec == "4 1 unlock exclusive"} { + T filter {} + set ::r [catchsql { SELECT * FROM b } db2] + } +} +sqlite3 db test.db +sqlite3 db2 test.db +do_test wal3-8.5 { + execsql { SELECT * FROM b } +} {Tehran Qom Markazi Qazvin Gilan Ardabil} +do_test wal3-8.6 { + set ::r +} {1 {locking protocol}} + +db close +db2 close + +faultsim_restore_and_reopen +sqlite3 db2 test.db +T filter xShmLock +T script lock_callback +proc lock_callback {method file handle spec} { + if {$spec == "1 7 unlock exclusive"} { + T filter {} + set ::r [catchsql { SELECT * FROM b } db2] + } +} +unset ::r +do_test wal3-8.5 { + execsql { SELECT * FROM b } +} {Tehran Qom Markazi Qazvin Gilan Ardabil} +do_test wal3-8.6 { + set ::r +} {1 {locking protocol}} + +db close +db2 close +T delete + +#------------------------------------------------------------------------- +# When a connection opens a read-lock on the database, it searches for +# an aReadMark[] slot that is already set to the mxFrame value for the +# new transaction. If it cannot find one, it attempts to obtain an +# exclusive lock on an aReadMark[] slot for the purposes of modifying +# the value, then drops back to a shared-lock for the duration of the +# transaction. +# +# This test case verifies that if an exclusive lock cannot be obtained +# on any aReadMark[] slot (because there are already several readers), +# the client takes a shared-lock on a slot without modifying the value +# and continues. +# +do_test wal3-9.0 { + file delete -force test.db test.db-journal test.db wal + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE whoami(x); + INSERT INTO whoami VALUES('nobody'); + } +} {wal} +for {set i 0} {$i < 50} {incr i} { + set c db$i + do_test wal3-9.1.$i { + sqlite3 $c test.db + execsql { UPDATE whoami SET x = $c } + execsql { + BEGIN; + SELECT * FROM whoami + } $c + } $c +} +for {set i 0} {$i < 50} {incr i} { + set c db$i + do_test wal3-9.2.$i { + execsql { SELECT * FROM whoami } $c + } $c +} +do_test wal3-9.3 { + for {set i 0} {$i < 49} {incr i} { db$i close } + execsql { PRAGMA wal_checkpoint } + set sz1 [file size test.db] + db49 close + execsql { PRAGMA wal_checkpoint } + set sz2 [file size test.db] + expr {$sz2 > $sz1} +} {1} + +db close + +finish_test + diff --git a/test/wal4.test b/test/wal4.test new file mode 100644 index 0000000..22ccfea --- /dev/null +++ b/test/wal4.test @@ -0,0 +1,64 @@ +# 2010 July 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# Verify that an empty database and a non-empty WAL file do not +# result in database corruption +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl +ifcapable !wal {finish_test ; return } + +do_test wal4-1.1 { + execsql { + PRAGMA journal_mode=WAL; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + SELECT x FROM t1 ORDER BY x; + } +} {wal 1 2} + +do_test wal4-1.2 { + # Save a copy of the file-system containing the wal and wal-index files + # only (no database file). + faultsim_save_and_close + file delete -force sv_test.db +} {} + +do_test wal4-1.3 { + faultsim_restore_and_reopen + catchsql { SELECT * FROM t1 } +} {1 {no such table: t1}} + +do_faultsim_test wal4-2 -prep { + faultsim_restore_and_reopen +} -body { + execsql { SELECT name FROM sqlite_master } +} -test { + # Result should be zero rows (empty db file). + # + faultsim_test_result {0 {}} + + # If the SELECT finished successfully, the WAL file should have been + # deleted. In no case should the database file have been written, so + # it should still be zero bytes in size regardless of whether or not + # a fault was injected. Test these assertions: + # + if { $testrc==0 && [file exists test.db-wal] } { + error "Wal file was not deleted" + } + if { [file size test.db]!=0 } { + error "Db file grew to [file size test.db] bytes" + } +} + +finish_test diff --git a/test/wal_common.tcl b/test/wal_common.tcl new file mode 100644 index 0000000..a5b165d --- /dev/null +++ b/test/wal_common.tcl @@ -0,0 +1,91 @@ +# 2010 June 03 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains common code used by many different malloc tests +# within the test suite. +# + +proc wal_file_size {nFrame pgsz} { + expr {32 + ($pgsz+24)*$nFrame} +} + +proc wal_frame_count {zFile pgsz} { + set f [file size $zFile] + expr {($f - 32) / ($pgsz+24)} +} + +proc wal_cksum_intlist {ckv1 ckv2 intlist} { + upvar $ckv1 c1 + upvar $ckv2 c2 + foreach {v1 v2} $intlist { + set c1 [expr {($c1 + $v1 + $c2)&0xFFFFFFFF}] + set c2 [expr {($c2 + $v2 + $c1)&0xFFFFFFFF}] + } +} + + +# This proc calculates checksums in the same way as those used by SQLite +# in WAL files. If the $endian argument is "big", then checksums are +# calculated by interpreting data as an array of big-endian integers. If +# it is "little", data is interpreted as an array of little-endian integers. +# +proc wal_cksum {endian ckv1 ckv2 blob} { + upvar $ckv1 c1 + upvar $ckv2 c2 + + if {$endian!="big" && $endian!="little"} { + return -error "Bad value \"$endian\" - must be \"big\" or \"little\"" + } + set scanpattern I* + if {$endian == "little"} { set scanpattern i* } + + binary scan $blob $scanpattern values + wal_cksum_intlist c1 c2 $values +} + +proc wal_set_walhdr {filename {intlist {}}} { + if {[llength $intlist]==6} { + set blob [binary format I6 $intlist] + set endian little + if {[lindex $intlist 0] & 0x00000001} { set endian big } + set c1 0 + set c2 0 + wal_cksum $endian c1 c2 $blob + append blob [binary format II $c1 $c2] + + set fd [open $filename r+] + fconfigure $fd -translation binary + fconfigure $fd -encoding binary + seek $fd 0 + puts -nonewline $fd $blob + close $fd + } + + set fd [open $filename] + fconfigure $fd -translation binary + fconfigure $fd -encoding binary + set blob [read $fd 24] + close $fd + + binary scan $blob I6 ints + set ints +} + +proc wal_fix_walindex_cksum {hdrvar} { + upvar $hdrvar hdr + set c1 0 + set c2 0 + wal_cksum_intlist c1 c2 [lrange $hdr 0 9] + lset hdr 10 $c1 + lset hdr 11 $c2 +} + + diff --git a/test/walbak.test b/test/walbak.test new file mode 100644 index 0000000..fe5b99e --- /dev/null +++ b/test/walbak.test @@ -0,0 +1,279 @@ +# 2010 April 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/wal_common.tcl +source $testdir/malloc_common.tcl + +do_not_use_codec + +ifcapable !wal {finish_test ; return } + + +# Test organization: +# +# walback-1.*: Simple tests. +# +# walback-2.*: Test backups when the source db is modified mid-backup. +# +# walback-3.*: Backup of WAL sources into rollback destinations, and +# vice-versa. +# + +# Make sure a simple backup from a WAL database works. +# +do_test walbak-1.0 { + execsql { + PRAGMA synchronous = NORMAL; + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = 0; + PRAGMA journal_mode = wal; + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES('I', 'one'); + COMMIT; + } +} {wal} +do_test walbak-1.1 { + file delete -force bak.db bak.db-journal bak.db-wal + db backup bak.db + file size bak.db +} [expr 3*1024] +do_test walbak-1.2 { + sqlite3 db2 bak.db + execsql { + SELECT * FROM t1; + PRAGMA main.journal_mode; + } db2 +} {I one wal} +do_test walbak-1.3 { + execsql { PRAGMA integrity_check } db2 +} {ok} +db2 close + +# Try a VACUUM on a WAL database. +# +do_test walbak-1.4 { + execsql { + VACUUM; + PRAGMA main.journal_mode; + } +} {wal} +do_test walbak-1.5 { + list [file size test.db] [file size test.db-wal] +} [list 1024 [wal_file_size 6 1024]] +do_test walbak-1.6 { + execsql { PRAGMA wal_checkpoint } + list [file size test.db] [file size test.db-wal] +} [list [expr 3*1024] [wal_file_size 6 1024]] +do_test walbak-1.7 { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 SELECT * FROM t1; + DROP TABLE t1; + } + list [file size test.db] [file size test.db-wal] +} [list [expr 3*1024] [wal_file_size 6 1024]] +do_test walbak-1.8 { + execsql { VACUUM } + list [file size test.db] [file size test.db-wal] +} [list [expr 3*1024] [wal_file_size 8 1024]] +do_test walbak-1.9 { + execsql { PRAGMA wal_checkpoint } + list [file size test.db] [file size test.db-wal] +} [list [expr 2*1024] [wal_file_size 8 1024]] + +#------------------------------------------------------------------------- +# Backups when the source db is modified mid-backup. +# +proc sig {{db db}} { + $db eval { + PRAGMA integrity_check; + SELECT md5sum(a, b) FROM t1; + } +} +db close +file delete test.db +sqlite3 db test.db +do_test walbak-2.1 { + execsql { PRAGMA journal_mode = WAL } + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + BEGIN; + INSERT INTO t1 VALUES(randomblob(500), randomblob(500)); + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 16 */ + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 64 */ + COMMIT; + } +} {} +do_test walbak-2.2 { + db backup abc.db + sqlite3 db2 abc.db + string compare [sig db] [sig db2] +} {0} + +do_test walbak-2.3 { + sqlite3_backup B db2 main db main + B step 50 + execsql { UPDATE t1 SET b = randomblob(500) } + list [B step 1000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test walbak-2.4 { + string compare [sig db] [sig db2] +} {0} + +do_test walbak-2.5 { + db close + sqlite3 db test.db + execsql { PRAGMA cache_size = 10 } + sqlite3_backup B db2 main db main + B step 50 + execsql { + BEGIN; + UPDATE t1 SET b = randomblob(500); + } + expr [file size test.db-wal] > 10*1024 +} {1} +do_test walbak-2.6 { + B step 1000 +} {SQLITE_BUSY} +do_test walbak-2.7 { + execsql COMMIT + list [B step 1000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test walbak-2.8 { + string compare [sig db] [sig db2] +} {0} + +do_test walbak-2.9 { + db close + sqlite3 db test.db + execsql { PRAGMA cache_size = 10 } + sqlite3_backup B db2 main db main + B step 50 + execsql { + BEGIN; + UPDATE t1 SET b = randomblob(500); + } + expr [file size test.db-wal] > 10*1024 +} {1} +do_test walbak-2.10 { + B step 1000 +} {SQLITE_BUSY} +do_test walbak-2.11 { + execsql ROLLBACK +set sigB [sig db] + list [B step 1000] [B finish] +} {SQLITE_DONE SQLITE_OK} +do_test walbak-2.12 { + string compare [sig db] [sig db2] +} {0} +db2 close +db close + +#------------------------------------------------------------------------- +# Run some backup operations to copy back and forth between WAL and: +# +# walbak-3.1.*: an in-memory database +# +# walbak-3.2.*: a temporary database +# +# walbak-3.3.*: a database in rollback mode. +# +# walbak-3.4.*: a database in rollback mode that (initially) uses a +# different page-size. +# +# Check that this does not confuse any connected clients. +# +foreach {tn setup} { + 1 { + sqlite3 db test.db + sqlite3 db2 :memory: + db eval { PRAGMA page_size = 1024 ; PRAGMA journal_mode = WAL } + db2 eval { PRAGMA page_size = 1024 } + } + + 2 { + sqlite3 db test.db + sqlite3 db2 "" + db eval { PRAGMA page_size = 1024 ; PRAGMA journal_mode = WAL } + db2 eval { PRAGMA page_size = 1024 } + } + + 3 { + sqlite3 db test.db + sqlite3 db2 test.db2 + db eval { PRAGMA page_size = 1024 ; PRAGMA journal_mode = WAL } + db2 eval { PRAGMA page_size = 1024 ; PRAGMA journal_mode = PERSIST } + } + + 4 { + sqlite3 db test.db + sqlite3 db2 test.db2 + db eval { PRAGMA page_size = 1024 ; PRAGMA journal_mode = WAL } + db2 eval { + PRAGMA page_size = 2048; + PRAGMA journal_mode = PERSIST; + CREATE TABLE xx(x); + } + } + +} { + foreach f [glob -nocomplain test.db*] { file delete -force $f } + + eval $setup + + do_test walbak-3.$tn.1 { + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + SELECT * FROM t1; + } + } {1 2 3 4} + + do_test walbak-3.$tn.2 { + sqlite3_backup B db2 main db main + B step 10000 + B finish + execsql { SELECT * FROM t1 } db2 + } {1 2 3 4} + + do_test walbak-3.$tn.3 { + execsql { + INSERT INTO t1 VALUES(5, 6); + INSERT INTO t1 VALUES(7, 8); + SELECT * FROM t1; + } db2 + } {1 2 3 4 5 6 7 8} + + do_test walbak-3.$tn.4 { + sqlite3_backup B db main db2 main + B step 10000 + B finish + execsql { SELECT * FROM t1 } + } {1 2 3 4 5 6 7 8} + + db close + db2 close +} + + +finish_test diff --git a/test/walbig.test b/test/walbig.test new file mode 100644 index 0000000..9ceb8ed --- /dev/null +++ b/test/walbig.test @@ -0,0 +1,73 @@ +# 2010 July 07 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the ability of SQLite to handle database +# files larger than 4GB in WAL mode. +# + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Do not use a codec for this file, as the database is manipulated using +# external methods (the [fake_big_file] and [hexio_write] commands). +# +do_not_use_codec + +# If SQLITE_DISABLE_LFS is defined, omit this file. +ifcapable !lfs { + finish_test + return +} + +set a_string_counter 1 +proc a_string {n} { + incr ::a_string_counter + string range [string repeat "${::a_string_counter}." $n] 1 $n +} +db func a_string a_string + +do_test walbig-1.0 { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a PRIMARY KEY, b UNIQUE); + INSERT INTO t1 VALUES(a_string(300), a_string(500)); + INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; + INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; + INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; + } +} {wal} + +db close +if {[catch {fake_big_file 5000 [pwd]/test.db}]} { + puts "**** Unable to create a file larger than 5000 MB. *****" + finish_test + return +} +hexio_write test.db 28 00000000 + +sqlite3 db test.db +db func a_string a_string +do_test walbig-1.1 { + execsql { INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1 } +} {} +db close + +sqlite3 db test.db +do_test walbig-1.2 { + execsql { SELECT a FROM t1 ORDER BY a } +} [lsort [execsql { SELECT a FROM t1 ORDER BY rowid }]] + +do_test walbig-1.3 { + execsql { SELECT b FROM t1 ORDER BY b } +} [lsort [execsql { SELECT b FROM t1 ORDER BY rowid }]] + +finish_test diff --git a/test/walcksum.test b/test/walcksum.test new file mode 100644 index 0000000..b2c4a90 --- /dev/null +++ b/test/walcksum.test @@ -0,0 +1,393 @@ +# 2010 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/wal_common.tcl + +ifcapable !wal {finish_test ; return } + +# Read and return the contents of file $filename. Treat the content as +# binary data. +# +proc readfile {filename} { + set fd [open $filename] + fconfigure $fd -encoding binary + fconfigure $fd -translation binary + set data [read $fd] + close $fd + return $data +} + +# +# File $filename must be a WAL file on disk. Check that the checksum of frame +# $iFrame in the file is correct when interpreting data as $endian-endian +# integers ($endian must be either "big" or "little"). If the checksum looks +# correct, return 1. Otherwise 0. +# +proc log_checksum_verify {filename iFrame endian} { + set data [readfile $filename] + + foreach {offset c1 c2} [log_checksum_calc $data $iFrame $endian] {} + + binary scan [string range $data $offset [expr $offset+7]] II expect1 expect2 + set expect1 [expr $expect1&0xFFFFFFFF] + set expect2 [expr $expect2&0xFFFFFFFF] + + expr {$c1==$expect1 && $c2==$expect2} +} + +# File $filename must be a WAL file on disk. Compute the checksum for frame +# $iFrame in the file by interpreting data as $endian-endian integers +# ($endian must be either "big" or "little"). Then write the computed +# checksum into the file. +# +proc log_checksum_write {filename iFrame endian} { + set data [readfile $filename] + + foreach {offset c1 c2} [log_checksum_calc $data $iFrame $endian] {} + + set bin [binary format II $c1 $c2] + set fd [open $filename r+] + fconfigure $fd -encoding binary + fconfigure $fd -translation binary + seek $fd $offset + puts -nonewline $fd $bin + close $fd +} + +# Calculate and return the checksum for a particular frame in a WAL. +# +# Arguments are: +# +# $data Blob containing the entire contents of a WAL. +# +# $iFrame Frame number within the $data WAL. Frames are numbered +# starting at 1. +# +# $endian One of "big" or "little". +# +# Returns a list of three elements, as follows: +# +# * The byte offset of the checksum belonging to frame $iFrame in the WAL. +# * The first integer in the calculated version of the checksum. +# * The second integer in the calculated version of the checksum. +# +proc log_checksum_calc {data iFrame endian} { + + binary scan [string range $data 8 11] I pgsz + if {$iFrame > 1} { + set n [wal_file_size [expr $iFrame-2] $pgsz] + binary scan [string range $data [expr $n+16] [expr $n+23]] II c1 c2 + } else { + set c1 0 + set c2 0 + wal_cksum $endian c1 c2 [string range $data 0 23] + } + + set n [wal_file_size [expr $iFrame-1] $pgsz] + wal_cksum $endian c1 c2 [string range $data $n [expr $n+7]] + wal_cksum $endian c1 c2 [string range $data [expr $n+24] [expr $n+24+$pgsz-1]] + + list [expr $n+16] $c1 $c2 +} + +# +# File $filename must be a WAL file on disk. Set the 'magic' field of the +# WAL header to indicate that checksums are $endian-endian ($endian must be +# either "big" or "little"). +# +# Also update the wal header checksum (since the wal header contents may +# have changed). +# +proc log_checksum_writemagic {filename endian} { + set val [expr {0x377f0682 | ($endian == "big" ? 1 : 0)}] + set bin [binary format I $val] + set fd [open $filename r+] + fconfigure $fd -encoding binary + fconfigure $fd -translation binary + puts -nonewline $fd $bin + + seek $fd 0 + set blob [read $fd 24] + set c1 0 + set c2 0 + wal_cksum $endian c1 c2 $blob + seek $fd 24 + puts -nonewline $fd [binary format II $c1 $c2] + + close $fd +} + +#------------------------------------------------------------------------- +# Test cases walcksum-1.* attempt to verify the following: +# +# * That both native and non-native order checksum log files can +# be recovered. +# +# * That when appending to native or non-native checksum log files +# SQLite continues to use the right kind of checksums. +# +# * Test point 2 when the appending process is not one that recovered +# the log file. +# +# * Test that both native and non-native checksum log files can be +# checkpointed. And that after doing so the next write to the log +# file occurs using native byte-order checksums. +# +set native "big" +if {$::tcl_platform(byteOrder) == "littleEndian"} { set native "little" } +foreach endian {big little} { + + # Create a database. Leave some data in the log file. + # + do_test walcksum-1.$endian.1 { + catch { db close } + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = 0; + PRAGMA synchronous = NORMAL; + + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + INSERT INTO t1 VALUES(3, 'three'); + INSERT INTO t1 VALUES(5, 'five'); + + PRAGMA journal_mode = WAL; + INSERT INTO t1 VALUES(8, 'eight'); + INSERT INTO t1 VALUES(13, 'thirteen'); + INSERT INTO t1 VALUES(21, 'twentyone'); + } + + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + db close + + list [file size test2.db] [file size test2.db-wal] + } [list [expr 1024*3] [wal_file_size 6 1024]] + + # Verify that the checksums are valid for all frames and that they + # are calculated by interpreting data in native byte-order. + # + for {set f 1} {$f <= 6} {incr f} { + do_test walcksum-1.$endian.2.$f { + log_checksum_verify test2.db-wal $f $native + } 1 + } + + # Replace all checksums in the current WAL file with $endian versions. + # Then check that it is still possible to recover and read the database. + # + log_checksum_writemagic test2.db-wal $endian + for {set f 1} {$f <= 6} {incr f} { + do_test walcksum-1.$endian.3.$f { + log_checksum_write test2.db-wal $f $endian + log_checksum_verify test2.db-wal $f $endian + } {1} + } + do_test walcksum-1.$endian.4.1 { + file copy -force test2.db test.db + file copy -force test2.db-wal test.db-wal + sqlite3 db test.db + execsql { SELECT a FROM t1 } + } {1 2 3 5 8 13 21} + + # Following recovery, any frames written to the log should use the same + # endianness as the existing frames. Check that this is the case. + # + do_test walcksum-1.$endian.5.0 { + execsql { + PRAGMA synchronous = NORMAL; + INSERT INTO t1 VALUES(34, 'thirtyfour'); + } + list [file size test.db] [file size test.db-wal] + } [list [expr 1024*3] [wal_file_size 8 1024]] + for {set f 1} {$f <= 8} {incr f} { + do_test walcksum-1.$endian.5.$f { + log_checksum_verify test.db-wal $f $endian + } {1} + } + + # Now connect a second connection to the database. Check that this one + # (not the one that did recovery) also appends frames to the log using + # the same endianness for checksums as the existing frames. + # + do_test walcksum-1.$endian.6 { + sqlite3 db2 test.db + execsql { + PRAGMA integrity_check; + SELECT a FROM t1; + } db2 + } {ok 1 2 3 5 8 13 21 34} + do_test walcksum-1.$endian.7.0 { + execsql { + PRAGMA synchronous = NORMAL; + INSERT INTO t1 VALUES(55, 'fiftyfive'); + } db2 + list [file size test.db] [file size test.db-wal] + } [list [expr 1024*3] [wal_file_size 10 1024]] + for {set f 1} {$f <= 10} {incr f} { + do_test walcksum-1.$endian.7.$f { + log_checksum_verify test.db-wal $f $endian + } {1} + } + + # Now that both the recoverer and non-recoverer have added frames to the + # log file, check that it can still be recovered. + # + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + do_test walcksum-1.$endian.7.11 { + sqlite3 db3 test2.db + execsql { + PRAGMA integrity_check; + SELECT a FROM t1; + } db3 + } {ok 1 2 3 5 8 13 21 34 55} + db3 close + + # Run a checkpoint on the database file. Then, check that any frames written + # to the start of the log use native byte-order checksums. + # + do_test walcksum-1.$endian.8.1 { + execsql { + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES(89, 'eightynine'); + } + log_checksum_verify test.db-wal 1 $native + } {1} + do_test walcksum-1.$endian.8.2 { + log_checksum_verify test.db-wal 2 $native + } {1} + do_test walcksum-1.$endian.8.3 { + log_checksum_verify test.db-wal 3 $native + } {0} + + do_test walcksum-1.$endian.9 { + execsql { + PRAGMA integrity_check; + SELECT a FROM t1; + } db2 + } {ok 1 2 3 5 8 13 21 34 55 89} + + catch { db close } + catch { db2 close } +} + +#------------------------------------------------------------------------- +# Test case walcksum-2.* tests that if a statement transaction is rolled +# back after frames are written to the WAL, and then (after writing some +# more) the outer transaction is committed, the WAL file is still correctly +# formatted (and can be recovered by a second process if required). +# +do_test walcksum-2.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + execsql { + PRAGMA synchronous = NORMAL; + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + PRAGMA cache_size = 10; + CREATE TABLE t1(x PRIMARY KEY); + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES(randomblob(800)); + BEGIN; + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 16 */ + SAVEPOINT one; + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 256 */ + ROLLBACK TO one; + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 256 */ + COMMIT; + } + + file copy -force test.db test2.db + file copy -force test.db-wal test2.db-wal + + sqlite3 db2 test2.db + execsql { + PRAGMA integrity_check; + SELECT count(*) FROM t1; + } db2 +} {ok 256} +catch { db close } +catch { db2 close } + +#------------------------------------------------------------------------- +# Test case walcksum-3.* tests that the checksum calculation detects single +# byte changes to frame or frame-header data and considers the frame +# invalid as a result. +# +do_test walcksum-3.1 { + file delete -force test.db test.db-wal test.db-journal + sqlite3 db test.db + + execsql { + PRAGMA synchronous = NORMAL; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randomblob(300)); + INSERT INTO t1 VALUES(2, randomblob(300)); + PRAGMA journal_mode = WAL; + INSERT INTO t1 VALUES(3, randomblob(300)); + } + + file size test.db-wal +} [wal_file_size 1 1024] +do_test walcksum-3.2 { + file copy -force test.db-wal test2.db-wal + file copy -force test.db test2.db + sqlite3 db2 test2.db + execsql { SELECT a FROM t1 } db2 +} {1 2 3} +db2 close +file copy -force test.db test2.db + + +foreach incr {1 2 3 20 40 60 80 100 120 140 160 180 200 220 240 253 254 255} { + do_test walcksum-3.3.$incr { + set FAIL 0 + for {set iOff 0} {$iOff < [wal_file_size 1 1024]} {incr iOff} { + + file copy -force test.db-wal test2.db-wal + set fd [open test2.db-wal r+] + fconfigure $fd -encoding binary + fconfigure $fd -translation binary + + seek $fd $iOff + binary scan [read $fd 1] c x + seek $fd $iOff + puts -nonewline $fd [binary format c [expr {($x+$incr)&0xFF}]] + close $fd + + sqlite3 db2 test2.db + if { [execsql { SELECT a FROM t1 } db2] != "1 2" } {set FAIL 1} + db2 close + } + set FAIL + } {0} +} + +finish_test + diff --git a/test/walcrash.test b/test/walcrash.test new file mode 100644 index 0000000..0ef21ff --- /dev/null +++ b/test/walcrash.test @@ -0,0 +1,296 @@ +# 2010 February 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library when +# recovering a database following a simulated system failure in +# "PRAGMA journal_mode=WAL" mode. +# + +# +# These are 'warm-body' tests of database recovery used while developing +# the WAL code. They serve to prove that a few really simple cases work: +# +# walcrash-1.*: Recover a database. +# walcrash-2.*: Recover a database where the failed transaction spanned more +# than one page. +# walcrash-3.*: Recover multiple databases where the failed transaction +# was a multi-file transaction. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable !wal {finish_test ; return } + +db close + +set seed 0 +set REPEATS 100 + +# walcrash-1.* +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + do_test walcrash-1.$i.1 { + crashsql -delay 4 -file test.db-wal -seed [incr seed] { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 1); + INSERT INTO t1 VALUES(2, 3); + INSERT INTO t1 VALUES(3, 6); + } + } {1 {child process exited abnormally}} + do_test walcrash-1.$i.2 { + sqlite3 db test.db + execsql { SELECT sum(a)==max(b) FROM t1 } + } {1} + integrity_check walcrash-1.$i.3 + db close + + do_test walcrash-1.$i.4 { + crashsql -delay 2 -file test.db-wal -seed [incr seed] { + INSERT INTO t1 VALUES(4, (SELECT sum(a) FROM t1) + 4); + INSERT INTO t1 VALUES(5, (SELECT sum(a) FROM t1) + 5); + } + } {1 {child process exited abnormally}} + do_test walcrash-1.$i.5 { + sqlite3 db test.db + execsql { SELECT sum(a)==max(b) FROM t1 } + } {1} + integrity_check walcrash-1.$i.6 + do_test walcrash-1.$i.7 { + execsql { PRAGMA main.journal_mode } + } {wal} + db close +} + +# walcrash-2.* +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + do_test walcrash-2.$i.1 { + crashsql -delay 4 -file test.db-wal -seed [incr seed] { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 VALUES(5, 9); + } + } {1 {child process exited abnormally}} + do_test walcrash-2.$i.2 { + sqlite3 db test.db + execsql { SELECT sum(a)==max(b) FROM t1 } + } {1} + integrity_check walcrash-2.$i.3 + db close + + do_test walcrash-2.$i.4 { + crashsql -delay 2 -file test.db-wal -seed [incr seed] { + INSERT INTO t1 VALUES(6, (SELECT sum(a) FROM t1) + 6); + INSERT INTO t1 VALUES(7, (SELECT sum(a) FROM t1) + 7); + } + } {1 {child process exited abnormally}} + do_test walcrash-2.$i.5 { + sqlite3 db test.db + execsql { SELECT sum(a)==max(b) FROM t1 } + } {1} + integrity_check walcrash-2.$i.6 + do_test walcrash-2.$i.6 { + execsql { PRAGMA main.journal_mode } + } {wal} + db close +} + +# walcrash-3.* +# +# for {set i 1} {$i < $REPEATS} {incr i} { +# file delete -force test.db test.db-wal +# file delete -force test2.db test2.db-wal +# +# do_test walcrash-3.$i.1 { +# crashsql -delay 2 -file test2.db-wal -seed [incr seed] { +# PRAGMA journal_mode = WAL; +# ATTACH 'test2.db' AS aux; +# CREATE TABLE t1(a PRIMARY KEY, b); +# CREATE TABLE aux.t2(a PRIMARY KEY, b); +# BEGIN; +# INSERT INTO t1 VALUES(1, 2); +# INSERT INTO t2 VALUES(1, 2); +# COMMIT; +# } +# } {1 {child process exited abnormally}} +# +# do_test walcrash-3.$i.2 { +# sqlite3_wal db test.db +# execsql { +# ATTACH 'test2.db' AS aux; +# SELECT * FROM t1 EXCEPT SELECT * FROM t2; +# } +# } {} +# do_test walcrash-3.$i.3 { execsql { PRAGMA main.integrity_check } } {ok} +# do_test walcrash-3.$i.4 { execsql { PRAGMA aux.integrity_check } } {ok} +# +# db close +# } + +# walcrash-4.* +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + file delete -force test2.db test2.db-wal + + do_test walcrash-4.$i.1 { + crashsql -delay 3 -file test.db-wal -seed [incr seed] -blocksize 4096 { + PRAGMA journal_mode = WAL; + PRAGMA page_size = 1024; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + } + } {1 {child process exited abnormally}} + + do_test walcrash-4.$i.2 { + sqlite3 db test.db + execsql { + SELECT * FROM t1 WHERE a = 1; + } + } {1 2} + do_test walcrash-4.$i.3 { execsql { PRAGMA main.integrity_check } } {ok} + do_test walcrash-4.$i.4 { execsql { PRAGMA main.journal_mode } } {wal} + + db close +} + +# walcrash-5.* +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + file delete -force test2.db test2.db-wal + + do_test walcrash-5.$i.1 { + crashsql -delay 11 -file test.db-wal -seed [incr seed] -blocksize 4096 { + PRAGMA journal_mode = WAL; + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE t1(x PRIMARY KEY); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4 */ + COMMIT; + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 8 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 12 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 16 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 20 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 24 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 28 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 32 */ + + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + } + } {1 {child process exited abnormally}} + + do_test walcrash-5.$i.2 { + sqlite3 db test.db + execsql { SELECT count(*)==33 OR count(*)==34 FROM t1 WHERE x != 1 } + } {1} + do_test walcrash-5.$i.3 { execsql { PRAGMA main.integrity_check } } {ok} + do_test walcrash-5.$i.4 { execsql { PRAGMA main.journal_mode } } {wal} + + db close +} + +# walcrash-6.* +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + file delete -force test2.db test2.db-wal + + do_test walcrash-6.$i.1 { + crashsql -delay 12 -file test.db-wal -seed [incr seed] -blocksize 512 { + PRAGMA journal_mode = WAL; + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE t1(x PRIMARY KEY); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4 */ + COMMIT; + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 8 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 12 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 16 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 20 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 24 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 28 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 32 */ + + PRAGMA wal_checkpoint; + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 VALUES(randomblob(900)); + } + } {1 {child process exited abnormally}} + + do_test walcrash-6.$i.2 { + sqlite3 db test.db + execsql { SELECT count(*)==34 OR count(*)==35 FROM t1 WHERE x != 1 } + } {1} + do_test walcrash-6.$i.3 { execsql { PRAGMA main.integrity_check } } {ok} + do_test walcrash-6.$i.4 { execsql { PRAGMA main.journal_mode } } {wal} + + db close +} + +#------------------------------------------------------------------------- +# This test case simulates a crash while checkpointing the database. Page +# 1 is one of the pages overwritten by the checkpoint. This is a special +# case because it means the content of page 1 may be damaged. SQLite will +# have to determine: +# +# (a) that the database is a WAL database, and +# (b) the database page-size +# +# based on the log file. +# +for {set i 1} {$i < $REPEATS} {incr i} { + file delete -force test.db test.db-wal + + # Select a page-size for this test. + # + set pgsz [lindex {512 1024 2048 4096 8192 16384} [expr $i%6]] + + do_test walcrash-7.$i.1 { + crashsql -delay 3 -file test.db -seed [incr seed] -blocksize 512 " + PRAGMA page_size = $pgsz; + PRAGMA journal_mode = wal; + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + PRAGMA wal_checkpoint; + CREATE INDEX i1 ON t1(a); + PRAGMA wal_checkpoint; + " + } {1 {child process exited abnormally}} + + do_test walcrash-7.$i.2 { + sqlite3 db test.db + execsql { SELECT b FROM t1 WHERE a = 1 } + } {2} + do_test walcrash-7.$i.3 { execsql { PRAGMA main.integrity_check } } {ok} + do_test walcrash-7.$i.4 { execsql { PRAGMA main.journal_mode } } {wal} + + db close +} + +finish_test + diff --git a/test/walcrash2.test b/test/walcrash2.test new file mode 100644 index 0000000..7116281 --- /dev/null +++ b/test/walcrash2.test @@ -0,0 +1,99 @@ +# 2010 May 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/lock_common.tcl +source $testdir/wal_common.tcl +ifcapable !wal {finish_test ; return } + + +#------------------------------------------------------------------------- +# This test case demonstrates a flaw in the wal-index manipulation that +# existed at one point: If a process crashes mid-transaction, it may have +# already added some entries to one of the hash-tables in the wal-index. +# If the transaction were to be explicitly rolled back at this point, the +# hash-table entries would be removed as part of the rollback. However, +# if the process crashes, the transaction is implicitly rolled back and +# the rogue entries remain in the hash table. +# +# Normally, this causes no problem - readers can tell the difference +# between committed and uncommitted entries in the hash table. However, +# if it happens often enough that all slots in the hash-table become +# non-zero, the next process that attempts to read or write the hash +# table falls into an infinite loop. +# +# Even if run with an SQLite version affected by the bug, this test case +# only goes into an infinite loop if SQLite is compiled without SQLITE_DEBUG +# defined. If SQLITE_DEBUG is defined, the program is halted by a failing +# assert() before entering the infinite loop. +# +# walcrash2-1.1: Create a database. Commit a transaction that adds 8 frames +# to the WAL (and 8 entry to the first hash-table in the +# wal-index). +# +# walcrash2-1.2: Have an external process open a transaction, add 8 entries +# to the wal-index hash-table, then crash. Repeat this 1023 +# times (so that the wal-index contains 8192 entries - all +# slots are non-zero). +# +# walcrash2-1.3: Using a new database connection, attempt to query the +# database. This should cause the process to go into the +# infinite loop. +# +do_test walcrash2-1.1 { + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = off; + PRAGMA journal_mode = WAL; + PRAGMA synchronous = NORMAL; + BEGIN; + CREATE TABLE t1(x); + CREATE TABLE t2(x); + CREATE TABLE t3(x); + CREATE TABLE t4(x); + CREATE TABLE t5(x); + CREATE TABLE t6(x); + CREATE TABLE t7(x); + COMMIT; + } + file size test.db-wal +} [wal_file_size 8 1024] +for {set nEntry 8} {$nEntry < 8192} {incr nEntry 8} { + do_test walcrash2-1.2.[expr $nEntry/8] { + set C [launch_testfixture] + testfixture $C { + sqlite3 db test.db + db eval { + PRAGMA cache_size = 15; + BEGIN; + INSERT INTO t1 VALUES(randomblob(900)); -- 1 row, 1 page + INSERT INTO t1 SELECT * FROM t1; -- 2 rows, 3 pages + INSERT INTO t1 SELECT * FROM t1; -- 4 rows, 5 pages + INSERT INTO t1 SELECT * FROM t1; -- 8 rows, 9 pages + INSERT INTO t1 SELECT * FROM t1; -- 16 rows, 17 pages + INSERT INTO t1 SELECT * FROM t1 LIMIT 3; -- 20 rows, 20 pages + } + } + close $C + file size test.db-wal + } [wal_file_size 16 1024] +} +do_test walcrash2-1.3 { + sqlite3 db2 test.db + execsql { SELECT count(*) FROM t1 } db2 +} {0} +catch { db2 close } + +finish_test + diff --git a/test/walfault.test b/test/walfault.test new file mode 100644 index 0000000..f22a40e --- /dev/null +++ b/test/walfault.test @@ -0,0 +1,450 @@ +# 2010 May 03 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl +source $testdir/lock_common.tcl + +ifcapable !wal {finish_test ; return } + +#------------------------------------------------------------------------- +# This test case, walfault-1-*, simulates faults while executing a +# +# PRAGMA journal_mode = WAL; +# +# statement immediately after creating a new database. +# +do_test walfault-1-pre-1 { + faultsim_delete_and_reopen + faultsim_save_and_close +} {} +do_faultsim_test walfault-1 -prep { + faultsim_restore_and_reopen +} -body { + db eval { PRAGMA main.journal_mode = WAL } +} -test { + + faultsim_test_result {0 wal} + + # Test that the connection that encountered an error as part of + # "PRAGMA journal_mode = WAL" and a new connection use the same + # journal mode when accessing the database. + # + # If "PRAGMA journal_mode" is executed immediately, connection [db] (the + # one that hit the error in journal_mode="WAL") might return "wal" even + # if it failed to switch the database to WAL mode. This is not considered + # a problem. When it tries to read the database, connection [db] correctly + # recognizes that it is a rollback database and switches back to a + # rollback compatible journal mode. + # + if {[permutation] != "inmemory_journal"} { + set jm [db one {SELECT * FROM sqlite_master ; PRAGMA main.journal_mode}] + sqlite3 db2 test.db + set jm2 [db2 one {SELECT * FROM sqlite_master ; PRAGMA main.journal_mode}] + db2 close + + if { $jm!=$jm2 } { error "Journal modes do not match: $jm $jm2" } + if { $testrc==0 && $jm!="wal" } { error "Journal mode is not WAL" } + } +} + +#-------------------------------------------------------------------------- +# Test case walfault-2-* tests fault injection during recovery of a +# short WAL file (a dozen frames or thereabouts). +# +do_test walfault-2-pre-1 { + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + BEGIN; + CREATE TABLE x(y, z, UNIQUE(y, z)); + INSERT INTO x VALUES(randomblob(100), randomblob(100)); + COMMIT; + PRAGMA wal_checkpoint; + + INSERT INTO x SELECT randomblob(100), randomblob(100) FROM x; + INSERT INTO x SELECT randomblob(100), randomblob(100) FROM x; + INSERT INTO x SELECT randomblob(100), randomblob(100) FROM x; + } + execsql { + SELECT count(*) FROM x + } +} {8} +do_test walfault-2-pre-2 { + faultsim_save_and_close + faultsim_restore_and_reopen + execsql { SELECT count(*) FROM x } +} {8} +do_faultsim_test walfault-2 -prep { + faultsim_restore_and_reopen +} -body { + execsql { SELECT count(*) FROM x } +} -test { + faultsim_test_result {0 8} + faultsim_integrity_check +} + +#-------------------------------------------------------------------------- +# Test fault injection while writing and checkpointing a small WAL file. +# +do_test walfault-3-pre-1 { + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA journal_mode = WAL; + CREATE TABLE abc(a PRIMARY KEY); + INSERT INTO abc VALUES(randomblob(1500)); + } + db close + faultsim_save_and_close +} {} +do_faultsim_test walfault-3 -prep { + faultsim_restore_and_reopen +} -body { + db eval { + DELETE FROM abc; + PRAGMA wal_checkpoint; + } +} -test { + faultsim_test_result {0 {}} +} + + +#-------------------------------------------------------------------------- +# +if {[permutation] != "inmemory_journal"} { + faultsim_delete_and_reopen + faultsim_save_and_close + do_faultsim_test walfault-4 -prep { + faultsim_restore_and_reopen + } -body { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES('a', 'b'); + PRAGMA wal_checkpoint; + SELECT * FROM t1; + } + } -test { + faultsim_test_result {0 {wal a b}} + faultsim_integrity_check + } +} + +#-------------------------------------------------------------------------- +# +do_test walfault-5-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 512; + PRAGMA journal_mode = WAL; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-5 -faults shmerr* -prep { + faultsim_restore_and_reopen + execsql { PRAGMA wal_autocheckpoint = 0 } + shmfault filter xShmMap +} -body { + execsql { + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES(randomblob(400)); /* 1 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 16 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 256 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 512 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 1024 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 2048 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 4096 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 8192 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 16384 */ + COMMIT; + SELECT count(*) FROM t1; + } +} -test { + faultsim_test_result {0 16384} + faultsim_integrity_check +} + +#-------------------------------------------------------------------------- +# +do_test walfault-6-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 512; + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES(randomblob(400)); /* 1 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 16 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 256 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 512 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 1024 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 2048 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 4096 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 8192 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 16384 */ + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-6 -faults shmerr* -prep { + faultsim_restore_and_reopen + shmfault filter xShmMap +} -body { + execsql { SELECT count(*) FROM t1 } +} -test { + faultsim_test_result {0 16384} + faultsim_integrity_check + set n [db one {SELECT count(*) FROM t1}] + if {$n != 16384 && $n != 0} { error "Incorrect number of rows: $n" } +} + +#-------------------------------------------------------------------------- +# +do_test walfault-7-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA page_size = 512; + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES(randomblob(400)); /* 1 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(400) FROM t1; /* 4 */ + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-7 -prep { + faultsim_restore_and_reopen +} -body { + execsql { SELECT count(*) FROM t1 } +} -test { + faultsim_test_result {0 4} + set n [db one {SELECT count(*) FROM t1}] + if {$n != 4 && $n != 0} { error "Incorrect number of rows: $n" } +} + +#-------------------------------------------------------------------------- +# +do_test walfault-8-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE abc(a PRIMARY KEY); + INSERT INTO abc VALUES(randomblob(900)); + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-8 -prep { + faultsim_restore_and_reopen + execsql { PRAGMA cache_size = 10 } +} -body { + execsql { + BEGIN; + INSERT INTO abc SELECT randomblob(900) FROM abc; /* 1 */ + --INSERT INTO abc SELECT randomblob(900) FROM abc; /* 2 */ + --INSERT INTO abc SELECT randomblob(900) FROM abc; /* 4 */ + --INSERT INTO abc SELECT randomblob(900) FROM abc; /* 8 */ + ROLLBACK; + SELECT count(*) FROM abc; + } +} -test { + faultsim_test_result {0 1} + + faultsim_integrity_check + catch { db eval ROLLBACK } + faultsim_integrity_check + + set n [db one {SELECT count(*) FROM abc}] + if {$n != 1} { error "Incorrect number of rows: $n" } +} + +#-------------------------------------------------------------------------- +# +do_test walfault-9-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE abc(a PRIMARY KEY); + INSERT INTO abc VALUES(randomblob(900)); + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-9 -prep { + #if {$iFail<73} { set iFail 73 } + #if {$iFail>73} { exit } + + faultsim_restore_and_reopen + execsql { PRAGMA cache_size = 10 } +} -body { + execsql { + BEGIN; + INSERT INTO abc SELECT randomblob(900) FROM abc; /* 1 */ + SAVEPOINT spoint; + INSERT INTO abc SELECT randomblob(900) FROM abc; /* 2 */ + INSERT INTO abc SELECT randomblob(900) FROM abc; /* 4 */ + INSERT INTO abc SELECT randomblob(900) FROM abc; /* 8 */ + ROLLBACK TO spoint; + COMMIT; + SELECT count(*) FROM abc; + } +} -test { + faultsim_test_result {0 2} + faultsim_integrity_check + + catch { db eval { ROLLBACK TO spoint } } + catch { db eval { COMMIT } } + set n [db one {SELECT count(*) FROM abc}] + if {$n != 1 && $n != 2} { error "Incorrect number of rows: $n" } +} + +do_test walfault-10-pre1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + CREATE TABLE z(zz INTEGER PRIMARY KEY, zzz BLOB); + CREATE INDEX zzzz ON z(zzz); + INSERT INTO z VALUES(NULL, randomblob(800)); + INSERT INTO z VALUES(NULL, randomblob(800)); + INSERT INTO z SELECT NULL, randomblob(800) FROM z; + INSERT INTO z SELECT NULL, randomblob(800) FROM z; + INSERT INTO z SELECT NULL, randomblob(800) FROM z; + INSERT INTO z SELECT NULL, randomblob(800) FROM z; + INSERT INTO z SELECT NULL, randomblob(800) FROM z; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-10 -prep { + faultsim_restore_and_reopen + execsql { + PRAGMA cache_size = 10; + BEGIN; + UPDATE z SET zzz = randomblob(799); + } + + set ::stmt [sqlite3_prepare db "SELECT zzz FROM z WHERE zz IN (1, 2, 3)" -1] + sqlite3_step $::stmt +} -body { + execsql { INSERT INTO z VALUES(NULL, NULL) } +} -test { + sqlite3_finalize $::stmt + faultsim_integrity_check + + faultsim_test_result {0 {}} + catch { db eval { ROLLBACK } } + faultsim_integrity_check + + set n [db eval {SELECT count(*), sum(length(zzz)) FROM z}] + if {$n != "64 51200"} { error "Incorrect data: $n" } +} + +#-------------------------------------------------------------------------- +# Test fault injection while checkpointing a large WAL file, if the +# checkpoint is the first operation run after opening the database. +# This means that some of the required wal-index pages are mapped as part of +# the checkpoint process, which means there are a few more opportunities +# for IO errors. +# +# To speed this up, IO errors are only simulated within xShmMap() calls. +# +do_test walfault-11-pre-1 { + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY); + INSERT INTO abc VALUES(randomblob(1500)); + INSERT INTO abc VALUES(randomblob(1500)); + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 4 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 8 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 16 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 32 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 64 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 128 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 256 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 512 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 1024 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 2048 + INSERT INTO abc SELECT randomblob(1500) FROM abc; -- 4096 + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-11 -faults shmerr* -prep { + catch { db2 close } + faultsim_restore_and_reopen + shmfault filter xShmMap +} -body { + db eval { SELECT count(*) FROM abc } + sqlite3 db2 test.db -vfs shmfault + db2 eval { PRAGMA wal_checkpoint } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# Test the handling of the various IO/OOM/SHM errors that may occur during +# a log recovery operation undertaken as part of a call to +# sqlite3_wal_checkpoint(). +# +do_test walfault-12-pre-1 { + faultsim_delete_and_reopen + execsql { + PRAGMA journal_mode = WAL; + PRAGMA wal_autocheckpoint = 0; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY); + INSERT INTO abc VALUES(randomblob(1500)); + INSERT INTO abc VALUES(randomblob(1500)); + COMMIT; + } + faultsim_save_and_close +} {} +do_faultsim_test walfault-12 -prep { + if {[info commands shmfault] == ""} { + testvfs shmfault -default true + } + faultsim_restore_and_reopen + db eval { SELECT * FROM sqlite_master } + shmfault shm test.db [string repeat "\000" 40] +} -body { + set rc [sqlite3_wal_checkpoint db] + if {$rc != "SQLITE_OK"} { error [sqlite3_errmsg db] } +} -test { + db close + faultsim_test_result {0 {}} +} + + +finish_test diff --git a/test/walhook.test b/test/walhook.test new file mode 100644 index 0000000..631ec83 --- /dev/null +++ b/test/walhook.test @@ -0,0 +1,109 @@ +# 2010 April 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# +# More specifically, this file contains regression tests for the +# sqlite3_wal_hook() mechanism, including the sqlite3_wal_autocheckpoint() +# and "PRAGMA wal_autocheckpoint" convenience interfaces. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/wal_common.tcl + +ifcapable !wal {finish_test ; return } + +set ::wal_hook [list] +proc wal_hook {zDb nEntry} { + lappend ::wal_hook $zDb $nEntry + return 0 +} +db wal_hook wal_hook + +do_test walhook-1.1 { + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = 0; + PRAGMA journal_mode = wal; + PRAGMA synchronous = normal; + CREATE TABLE t1(i PRIMARY KEY, j); + } + set ::wal_hook +} {main 3} + +do_test walhook-1.2 { + set ::wal_hook [list] + execsql { INSERT INTO t1 VALUES(1, 'one') } + set ::wal_hook +} {main 5} +do_test walhook-1.3 { + proc wal_hook {args} { db eval {PRAGMA wal_checkpoint}; return 0 } + execsql { INSERT INTO t1 VALUES(2, 'two') } + file size test.db +} [expr 3*1024] +do_test walhook-1.4 { + proc wal_hook {zDb nEntry} { + execsql { PRAGMA wal_checkpoint } + return 0 + } + execsql { CREATE TABLE t2(a, b) } + file size test.db +} [expr 4*1024] + +do_test walhook-1.5 { + sqlite3 db2 test.db + proc wal_hook {zDb nEntry} { + execsql { PRAGMA wal_checkpoint } db2 + return 0 + } + execsql { CREATE TABLE t3(a PRIMARY KEY, b) } + file size test.db +} [expr 6*1024] + +db2 close +db close +sqlite3 db test.db +do_test walhook-2.1 { + execsql { PRAGMA synchronous = NORMAL } + execsql { PRAGMA wal_autocheckpoint } +} {1000} +do_test walhook-2.2 { + execsql { PRAGMA wal_autocheckpoint = 10} +} {10} +do_test walhook-2.3 { + execsql { PRAGMA wal_autocheckpoint } +} {10} + +# +# The database connection is configured with "PRAGMA wal_autocheckpoint = 10". +# Check that transactions are written to the log file until it contains at +# least 10 frames, then the database is checkpointed. Subsequent transactions +# are written into the start of the log file. +# +foreach {tn sql dbpages logpages} { + 4 "CREATE TABLE t4(x PRIMARY KEY, y)" 6 3 + 5 "INSERT INTO t4 VALUES(1, 'one')" 6 5 + 6 "INSERT INTO t4 VALUES(2, 'two')" 6 7 + 7 "INSERT INTO t4 VALUES(3, 'three')" 6 9 + 8 "INSERT INTO t4 VALUES(4, 'four')" 8 11 + 9 "INSERT INTO t4 VALUES(5, 'five')" 8 11 +} { + do_test walhook-2.$tn { + execsql $sql + list [file size test.db] [file size test.db-wal] + } [list [expr $dbpages*1024] [wal_file_size $logpages 1024]] +} + +catch { db2 close } +catch { db close } +finish_test diff --git a/test/walmode.test b/test/walmode.test new file mode 100644 index 0000000..1a54277 --- /dev/null +++ b/test/walmode.test @@ -0,0 +1,380 @@ +# 2010 April 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# If the library was compiled without WAL support, check that the +# "PRAGMA journal_mode=WAL" treats "WAL" as an unrecognized mode. +# +ifcapable !wal { + + do_test walmode-0.1 { + execsql { PRAGMA journal_mode = wal } + } {delete} + do_test walmode-0.2 { + execsql { PRAGMA main.journal_mode = wal } + } {delete} + do_test walmode-0.3 { + execsql { PRAGMA main.journal_mode } + } {delete} + + finish_test + return +} + +do_test walmode-1.1 { + set sqlite_sync_count 0 + execsql { PRAGMA page_size = 1024 } + execsql { PRAGMA journal_mode = wal } +} {wal} +do_test walmode-1.2 { + file size test.db +} {1024} + +set expected_sync_count 3 +if {$::tcl_platform(platform)!="windows"} { + ifcapable dirsync { + incr expected_sync_count + } +} +do_test walmode-1.3 { + set sqlite_sync_count +} $expected_sync_count + +do_test walmode-1.4 { + file exists test.db-wal +} {0} +do_test walmode-1.5 { + execsql { CREATE TABLE t1(a, b) } + file size test.db +} {1024} +do_test walmode-1.6 { + file exists test.db-wal +} {1} +do_test walmode-1.7 { + db close + file exists test.db-wal +} {0} + +# There is now a database file with the read and write versions set to 2 +# in the file system. This file should default to WAL mode. +# +do_test walmode-2.1 { + sqlite3 db test.db + file exists test.db-wal +} {0} +do_test walmode-2.2 { + execsql { SELECT * FROM sqlite_master } + file exists test.db-wal +} {1} +do_test walmode-2.3 { + db close + file exists test.db-wal +} {0} + +# If the first statement executed is "PRAGMA journal_mode = wal", and +# the file is already configured for WAL (read and write versions set +# to 2), then there should be no need to write the database. The +# statement should cause the client to connect to the log file. +# +set sqlite_sync_count 0 +do_test walmode-3.1 { + sqlite3 db test.db + execsql { PRAGMA journal_mode = wal } +} {wal} +do_test walmode-3.2 { + list $sqlite_sync_count [file exists test.db-wal] [file size test.db-wal] +} {0 1 0} + +# Test that changing back to journal_mode=persist works. +# +do_test walmode-4.1 { + execsql { INSERT INTO t1 VALUES(1, 2) } + execsql { PRAGMA journal_mode = persist } +} {persist} +do_test walmode-4.2 { + list [file exists test.db-journal] [file exists test.db-wal] +} {1 0} +do_test walmode-4.3 { + execsql { SELECT * FROM t1 } +} {1 2} +do_test walmode-4.4 { + db close + sqlite3 db test.db + execsql { SELECT * FROM t1 } +} {1 2} +do_test walmode-4.5 { + list [file exists test.db-journal] [file exists test.db-wal] +} {1 0} + +# Test that nothing goes wrong if a connection is prevented from changing +# from WAL to rollback mode because a second connection has the database +# open. Or from rollback to WAL. +# +do_test walmode-4.6 { + sqlite3 db2 test.db + execsql { PRAGMA main.journal_mode } db2 +} {delete} +do_test walmode-4.7 { + execsql { PRAGMA main.journal_mode = wal } db +} {wal} +do_test walmode-4.8 { + execsql { SELECT * FROM t1 } db2 +} {1 2} +do_test walmode-4.9 { + catchsql { PRAGMA journal_mode = delete } db +} {1 {database is locked}} +do_test walmode-4.10 { + execsql { PRAGMA main.journal_mode } db +} {wal} + +do_test walmode-4.11 { + db2 close + execsql { PRAGMA journal_mode = delete } db +} {delete} +do_test walmode-4.12 { + execsql { PRAGMA main.journal_mode } db +} {delete} +do_test walmode-4.13 { + list [file exists test.db-journal] [file exists test.db-wal] +} {0 0} +do_test walmode-4.14 { + sqlite3 db2 test.db + execsql { + BEGIN; + SELECT * FROM t1; + } db2 +} {1 2} + +do_test walmode-4.16 { execsql { PRAGMA main.journal_mode } db } {delete} +do_test walmode-4.17 { execsql { PRAGMA main.journal_mode } db2 } {delete} + +do_test walmode-4.17 { + catchsql { PRAGMA main.journal_mode = wal } db +} {1 {database is locked}} +do_test walmode-4.18 { + execsql { PRAGMA main.journal_mode } db +} {delete} +catch { db close } +catch { db2 close } + +# Test that it is not possible to change a temporary or in-memory database +# to WAL mode. WAL mode is for persistent file-backed databases only. +# +# walmode-5.1.*: Try to set journal_mode=WAL on [sqlite3 db :memory:] database. +# walmode-5.2.*: Try to set journal_mode=WAL on [sqlite3 db ""] database. +# walmode-5.3.*: Try to set temp.journal_mode=WAL. +# +do_test walmode-5.1.1 { + sqlite3 db :memory: + execsql { PRAGMA main.journal_mode } +} {memory} +do_test walmode-5.1.2 { + execsql { PRAGMA main.journal_mode = wal } +} {memory} +do_test walmode-5.1.3 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + SELECT * FROM t1; + PRAGMA main.journal_mode; + } +} {1 2 memory} +do_test walmode-5.1.4 { + execsql { PRAGMA main.journal_mode = wal } +} {memory} +do_test walmode-5.1.5 { + execsql { + INSERT INTO t1 VALUES(3, 4); + SELECT * FROM t1; + PRAGMA main.journal_mode; + } +} {1 2 3 4 memory} + +do_test walmode-5.2.1 { + sqlite3 db "" + execsql { PRAGMA main.journal_mode } +} {delete} +do_test walmode-5.2.2 { + execsql { PRAGMA main.journal_mode = wal } +} {delete} +do_test walmode-5.2.3 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + SELECT * FROM t1; + PRAGMA main.journal_mode; + } +} {1 2 delete} +do_test walmode-5.2.4 { + execsql { PRAGMA main.journal_mode = wal } +} {delete} +do_test walmode-5.2.5 { + execsql { + INSERT INTO t1 VALUES(3, 4); + SELECT * FROM t1; + PRAGMA main.journal_mode; + } +} {1 2 3 4 delete} + +if {$TEMP_STORE>=2} { + set tempJrnlMode memory +} else { + set tempJrnlMode delete +} +do_test walmode-5.3.1 { + sqlite3 db test.db + execsql { PRAGMA temp.journal_mode } +} $tempJrnlMode +do_test walmode-5.3.2 { + execsql { PRAGMA temp.journal_mode = wal } +} $tempJrnlMode +do_test walmode-5.3.3 { + execsql { + BEGIN; + CREATE TEMP TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + SELECT * FROM t1; + PRAGMA temp.journal_mode; + } +} [list 1 2 $tempJrnlMode] +do_test walmode-5.3.4 { + execsql { PRAGMA temp.journal_mode = wal } +} $tempJrnlMode +do_test walmode-5.3.5 { + execsql { + INSERT INTO t1 VALUES(3, 4); + SELECT * FROM t1; + PRAGMA temp.journal_mode; + } +} [list 1 2 3 4 $tempJrnlMode] + + +#------------------------------------------------------------------------- +# Test changing to WAL mode from journal_mode=off or journal_mode=memory +# +foreach {tn mode} { + 1 off + 2 memory + 3 persist + 4 delete + 5 truncate +} { + do_test walmode-6.$tn { + faultsim_delete_and_reopen + execsql " + PRAGMA journal_mode = $mode; + PRAGMA journal_mode = wal; + " + } [list $mode wal] +} +db close + +#------------------------------------------------------------------------- +# Test the effect of a "PRAGMA journal_mode" command being the first +# thing executed by a new connection. This means that the schema is not +# loaded when sqlite3_prepare_v2() is called to compile the statement. +# +do_test walmode-7.0 { + file delete -force test.db + sqlite3 db test.db + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a, b); + } +} {wal} +foreach {tn sql result} { + 1 "PRAGMA journal_mode" wal + 2 "PRAGMA main.journal_mode" wal + 3 "PRAGMA journal_mode = delete" delete + 4 "PRAGMA journal_mode" delete + 5 "PRAGMA main.journal_mode" delete + 6 "PRAGMA journal_mode = wal" wal + 7 "PRAGMA journal_mode" wal + 8 "PRAGMA main.journal_mode" wal + + 9 "PRAGMA journal_mode" wal + 10 "PRAGMA main.journal_mode" wal + 11 "PRAGMA main.journal_mode = delete" delete + 12 "PRAGMA journal_mode" delete + 13 "PRAGMA main.journal_mode" delete + 14 "PRAGMA main.journal_mode = wal" wal + 15 "PRAGMA journal_mode" wal + 16 "PRAGMA main.journal_mode" wal +} { + do_test walmode-7.$tn { + db close + sqlite3 db test.db + execsql $sql + } $result +} +db close + +#------------------------------------------------------------------------- +# Test the effect of a "PRAGMA journal_mode" command on an attached +# database. +# +faultsim_delete_and_reopen +do_execsql_test walmode-8.1 { + CREATE TABLE t1(a, b); + PRAGMA journal_mode = WAL; + ATTACH 'test.db2' AS two; + CREATE TABLE two.t2(a, b); +} {wal} +do_execsql_test walmode-8.2 { PRAGMA main.journal_mode } {wal} +do_execsql_test walmode-8.3 { PRAGMA two.journal_mode } {delete} +do_execsql_test walmode-8.4 { PRAGMA two.journal_mode = DELETE } {delete} + +db close +sqlite3 db test.db +do_execsql_test walmode-8.5 { ATTACH 'test.db2' AS two } {} +do_execsql_test walmode-8.6 { PRAGMA main.journal_mode } {wal} +do_execsql_test walmode-8.7 { PRAGMA two.journal_mode } {delete} +do_execsql_test walmode-8.8 { INSERT INTO two.t2 DEFAULT VALUES } {} +do_execsql_test walmode-8.9 { PRAGMA two.journal_mode } {delete} +do_execsql_test walmode-8.10 { INSERT INTO t1 DEFAULT VALUES } {} +do_execsql_test walmode-8.11 { PRAGMA main.journal_mode } {wal} +do_execsql_test walmode-8.12 { PRAGMA journal_mode } {wal} + +# Change to WAL mode on test2.db and make sure (in the tests that follow) +# that this mode change persists. +do_test walmode-8.x1 { + execsql { + PRAGMA two.journal_mode=WAL; + PRAGMA two.journal_mode; + } +} {wal wal} + +db close +sqlite3 db test.db +do_execsql_test walmode-8.13 { PRAGMA journal_mode = WAL } {wal} +do_execsql_test walmode-8.14 { ATTACH 'test.db2' AS two } {} +do_execsql_test walmode-8.15 { PRAGMA main.journal_mode } {wal} +do_execsql_test walmode-8.16 { PRAGMA two.journal_mode } {wal} +do_execsql_test walmode-8.17 { INSERT INTO two.t2 DEFAULT VALUES } {} +do_execsql_test walmode-8.18 { PRAGMA two.journal_mode } {wal} + +sqlite3 db2 test.db2 +do_test walmode-8.19 { execsql { PRAGMA main.journal_mode } db2 } {wal} +db2 close + +finish_test diff --git a/test/walslow.test b/test/walslow.test new file mode 100644 index 0000000..d726952 --- /dev/null +++ b/test/walslow.test @@ -0,0 +1,73 @@ +# 2010 March 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode. The tests in this file use +# brute force methods, so may take a while to run. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !wal {finish_test ; return } + +proc reopen_db {} { + catch { db close } + file delete -force test.db test.db-wal + sqlite3 db test.db + execsql { PRAGMA journal_mode = wal } +} + +db close +save_prng_state +for {set seed 1} {$seed<10} {incr seed} { + expr srand($seed) + restore_prng_state + reopen_db + do_test walslow-1.seed=$seed.0 { + execsql { CREATE TABLE t1(a, b) } + execsql { CREATE INDEX i1 ON t1(a) } + execsql { CREATE INDEX i2 ON t1(b) } + } {} + + for {set iTest 1} {$iTest < 100} {incr iTest} { + + do_test walslow-1.seed=$seed.$iTest.1 { + set w [expr int(rand()*2000)] + set x [expr int(rand()*2000)] + execsql { INSERT INTO t1 VALUES(randomblob($w), randomblob($x)) } + execsql { PRAGMA integrity_check } + } {ok} + + do_test walslow-1.seed=$seed.$iTest.2 { + execsql "PRAGMA wal_checkpoint;" + execsql { PRAGMA integrity_check } + } {ok} + + do_test walslow-1.seed=$seed.$iTest.3 { + file delete -force testX.db testX.db-wal + file copy test.db testX.db + file copy test.db-wal testX.db-wal + + sqlite3 db2 testX.db + execsql { PRAGMA journal_mode = WAL } db2 + execsql { PRAGMA integrity_check } db2 + } {ok} + + do_test walslow-1.seed=$seed.$iTest.4 { + execsql { SELECT count(*) FROM t1 WHERE a!=b } db2 + } [execsql { SELECT count(*) FROM t1 WHERE a!=b }] + db2 close + } +} + + +finish_test diff --git a/test/walthread.test b/test/walthread.test new file mode 100644 index 0000000..9817c0e --- /dev/null +++ b/test/walthread.test @@ -0,0 +1,527 @@ +# 2010 April 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the operation of the library in +# "PRAGMA journal_mode=WAL" mode with multiple threads. +# + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +source $testdir/lock_common.tcl +if {[run_thread_tests]==0} { finish_test ; return } +ifcapable !wal { finish_test ; return } + +set sqlite_walsummary_mmap_incr 64 + +# How long, in seconds, to run each test for. If a test is set to run for +# 0 seconds, it is omitted entirely. +# +unset -nocomplain seconds +set seconds(walthread-1) 20 +set seconds(walthread-2) 20 +set seconds(walthread-3) 20 +set seconds(walthread-4) 20 +set seconds(walthread-5) 1 + +# The parameter is the name of a variable in the callers context. The +# variable may or may not exist when this command is invoked. +# +# If the variable does exist, its value is returned. Otherwise, this +# command uses [vwait] to wait until it is set, then returns the value. +# In other words, this is a version of the [set VARNAME] command that +# blocks until a variable exists. +# +proc wait_for_var {varname} { + if {0==[uplevel [list info exists $varname]]} { + uplevel [list vwait $varname] + } + uplevel [list set $varname] +} + +# The argument is the name of a list variable in the callers context. The +# first element of the list is removed and returned. For example: +# +# set L {a b c} +# set x [lshift L] +# assert { $x == "a" && $L == "b c" } +# +proc lshift {lvar} { + upvar $lvar L + set ret [lindex $L 0] + set L [lrange $L 1 end] + return $ret +} + + +#------------------------------------------------------------------------- +# do_thread_test TESTNAME OPTIONS... +# +# where OPTIONS are: +# +# -seconds SECONDS How many seconds to run the test for +# -init SCRIPT Script to run before test. +# -thread NAME COUNT SCRIPT Scripts to run in threads (or processes). +# -processes BOOLEAN True to use processes instead of threads. +# -check SCRIPT Script to run after test. +# +proc do_thread_test {args} { + + set A $args + + set P(testname) [lshift A] + set P(seconds) 5 + set P(init) "" + set P(threads) [list] + set P(processes) 0 + set P(check) { + set ic [db eval "PRAGMA integrity_check"] + if {$ic != "ok"} { error $ic } + } + + unset -nocomplain ::done + + while {[llength $A]>0} { + set a [lshift A] + switch -glob -- $a { + -seconds { + set P(seconds) [lshift A] + } + + -init { + set P(init) [lshift A] + } + + -processes { + set P(processes) [lshift A] + } + + -check { + set P(check) [lshift A] + } + + -thread { + set name [lshift A] + set count [lshift A] + set prg [lshift A] + lappend P(threads) [list $name $count $prg] + } + + default { + error "Unknown option: $a" + } + } + } + + if {$P(seconds) == 0} { + puts "Skipping $P(testname)" + return + } + + puts "Running $P(testname) for $P(seconds) seconds..." + + catch { db close } + file delete -force test.db test.db-journal test.db-wal + + sqlite3 db test.db + eval $P(init) + catch { db close } + + foreach T $P(threads) { + set name [lindex $T 0] + set count [lindex $T 1] + set prg [lindex $T 2] + + for {set i 1} {$i <= $count} {incr i} { + set vars " + set E(pid) $i + set E(nthread) $count + set E(seconds) $P(seconds) + " + set program [string map [list %TEST% $prg %VARS% $vars] { + + %VARS% + + proc usleep {ms} { + set ::usleep 0 + after $ms {set ::usleep 1} + vwait ::usleep + } + + proc integrity_check {{db db}} { + set ic [$db eval {PRAGMA integrity_check}] + if {$ic != "ok"} {error $ic} + } + + proc busyhandler {n} { usleep 10 ; return 0 } + + sqlite3 db test.db + db busy busyhandler + db eval { SELECT randomblob($E(pid)*5) } + + set ::finished 0 + after [expr $E(seconds) * 1000] {set ::finished 1} + proc tt_continue {} { update ; expr ($::finished==0) } + + set rc [catch { %TEST% } msg] + + catch { db close } + list $rc $msg + }] + + if {$P(processes)==0} { + sqlthread spawn ::done($name,$i) $program + } else { + testfixture_nb ::done($name,$i) $program + } + } + } + + set report " Results:" + foreach T $P(threads) { + set name [lindex $T 0] + set count [lindex $T 1] + set prg [lindex $T 2] + + set reslist [list] + for {set i 1} {$i <= $count} {incr i} { + set res [wait_for_var ::done($name,$i)] + lappend reslist [lindex $res 1] + do_test $P(testname).$name.$i [list lindex $res 0] 0 + } + + append report " $name $reslist" + } + puts $report + + sqlite3 db test.db + set res "" + if {[catch $P(check) msg]} { set res $msg } + do_test $P(testname).check [list set {} $res] "" +} + +# A wrapper around [do_thread_test] which runs the specified test twice. +# Once using processes, once using threads. This command takes the same +# arguments as [do_thread_test], except specifying the -processes switch +# is illegal. +# +proc do_thread_test2 {args} { + set name [lindex $args 0] + if {[lsearch $args -processes]>=0} { error "bad option: -processes"} + uplevel [lreplace $args 0 0 do_thread_test "$name-threads" -processes 0] + uplevel [lreplace $args 0 0 do_thread_test "$name-processes" -processes 1] +} + +#-------------------------------------------------------------------------- +# Start 10 threads. Each thread performs both read and write +# transactions. Each read transaction consists of: +# +# 1) Reading the md5sum of all but the last table row, +# 2) Running integrity check. +# 3) Reading the value stored in the last table row, +# 4) Check that the values read in steps 1 and 3 are the same, and that +# the md5sum of all but the last table row has not changed. +# +# Each write transaction consists of: +# +# 1) Modifying the contents of t1 (inserting, updating, deleting rows). +# 2) Appending a new row to the table containing the md5sum() of all +# rows in the table. +# +# Each of the N threads runs N read transactions followed by a single write +# transaction in a loop as fast as possible. +# +# There is also a single checkpointer thread. It runs the following loop: +# +# 1) Execute "PRAGMA wal_checkpoint" +# 2) Sleep for 500 ms. +# +do_thread_test2 walthread-1 -seconds $seconds(walthread-1) -init { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(x PRIMARY KEY); + PRAGMA lock_status; + INSERT INTO t1 VALUES(randomblob(100)); + INSERT INTO t1 VALUES(randomblob(100)); + INSERT INTO t1 SELECT md5sum(x) FROM t1; + } +} -thread main 10 { + + proc read_transaction {} { + set results [db eval { + BEGIN; + PRAGMA integrity_check; + SELECT md5sum(x) FROM t1 WHERE rowid != (SELECT max(rowid) FROM t1); + SELECT x FROM t1 WHERE rowid = (SELECT max(rowid) FROM t1); + SELECT md5sum(x) FROM t1 WHERE rowid != (SELECT max(rowid) FROM t1); + COMMIT; + }] + + if {[llength $results]!=4 + || [lindex $results 0] != "ok" + || [lindex $results 1] != [lindex $results 2] + || [lindex $results 2] != [lindex $results 3] + } { + error "Failed read transaction: $results" + } + } + + proc write_transaction {} { + db eval { + BEGIN; + INSERT INTO t1 VALUES(randomblob(100)); + INSERT INTO t1 VALUES(randomblob(100)); + INSERT INTO t1 SELECT md5sum(x) FROM t1; + COMMIT; + } + } + + # Turn off auto-checkpoint. Otherwise, an auto-checkpoint run by a + # writer may cause the dedicated checkpoint thread to return an + # SQLITE_BUSY error. + # + db eval { PRAGMA wal_autocheckpoint = 0 } + + set nRun 0 + while {[tt_continue]} { + read_transaction + write_transaction + incr nRun + } + set nRun + +} -thread ckpt 1 { + set nRun 0 + while {[tt_continue]} { + db eval "PRAGMA wal_checkpoint" + usleep 500 + incr nRun + } + set nRun +} + +#-------------------------------------------------------------------------- +# This test has clients run the following procedure as fast as possible +# in a loop: +# +# 1. Open a database handle. +# 2. Execute a read-only transaction on the db. +# 3. Do "PRAGMA journal_mode = XXX", where XXX is one of WAL or DELETE. +# Ignore any SQLITE_BUSY error. +# 4. Execute a write transaction to insert a row into the db. +# 5. Run "PRAGMA integrity_check" +# +# At present, there are 4 clients in total. 2 do "journal_mode = WAL", and +# two do "journal_mode = DELETE". +# +# Each client returns a string of the form "W w, R r", where W is the +# number of write-transactions performed using a WAL journal, and D is +# the number of write-transactions performed using a rollback journal. +# For example, "192 w, 185 r". +# +do_thread_test2 walthread-2 -seconds $seconds(walthread-2) -init { + execsql { CREATE TABLE t1(x INTEGER PRIMARY KEY, y UNIQUE) } +} -thread RB 2 { + + db close + set nRun 0 + set nDel 0 + while {[tt_continue]} { + sqlite3 db test.db + db busy busyhandler + db eval { SELECT * FROM sqlite_master } + catch { db eval { PRAGMA journal_mode = DELETE } } + db eval { + BEGIN; + INSERT INTO t1 VALUES(NULL, randomblob(100+$E(pid))); + } + incr nRun 1 + incr nDel [file exists test.db-journal] + if {[file exists test.db-journal] + [file exists test.db-wal] != 1} { + error "File-system looks bad..." + } + db eval COMMIT + + integrity_check + db close + } + list $nRun $nDel + set {} "[expr $nRun-$nDel] w, $nDel r" + +} -thread WAL 2 { + db close + set nRun 0 + set nDel 0 + while {[tt_continue]} { + sqlite3 db test.db + db busy busyhandler + db eval { SELECT * FROM sqlite_master } + catch { db eval { PRAGMA journal_mode = WAL } } + db eval { + BEGIN; + INSERT INTO t1 VALUES(NULL, randomblob(110+$E(pid))); + } + incr nRun 1 + incr nDel [file exists test.db-journal] + if {[file exists test.db-journal] + [file exists test.db-wal] != 1} { + error "File-system looks bad..." + } + db eval COMMIT + + integrity_check + db close + } + set {} "[expr $nRun-$nDel] w, $nDel r" +} + +do_thread_test walthread-3 -seconds $seconds(walthread-3) -init { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(cnt PRIMARY KEY, sum1, sum2); + CREATE INDEX i1 ON t1(sum1); + CREATE INDEX i2 ON t1(sum2); + INSERT INTO t1 VALUES(0, 0, 0); + } +} -thread t 10 { + + set nextwrite $E(pid) + + proc wal_hook {zDb nEntry} { + if {$nEntry>10} { + set rc [catch { db eval {PRAGMA wal_checkpoint} } msg] + if {$rc && $msg != "database is locked"} { error $msg } + } + return 0 + } + db wal_hook wal_hook + + while {[tt_continue]} { + set max 0 + while { $max != ($nextwrite-1) && [tt_continue] } { + set max [db eval { SELECT max(cnt) FROM t1 }] + } + + if {[tt_continue]} { + set sum1 [db eval { SELECT sum(cnt) FROM t1 }] + set sum2 [db eval { SELECT sum(sum1) FROM t1 }] + db eval { INSERT INTO t1 VALUES($nextwrite, $sum1, $sum2) } + incr nextwrite $E(nthread) + integrity_check + } + } + + set {} ok +} -check { + puts " Final db contains [db eval {SELECT count(*) FROM t1}] rows" + puts " Final integrity-check says: [db eval {PRAGMA integrity_check}]" + + # Check that the contents of the database are Ok. + set c 0 + set s1 0 + set s2 0 + db eval { SELECT cnt, sum1, sum2 FROM t1 ORDER BY cnt } { + if {$c != $cnt || $s1 != $sum1 || $s2 != $sum2} { + error "database content is invalid" + } + incr s2 $s1 + incr s1 $c + incr c 1 + } +} + +do_thread_test2 walthread-4 -seconds $seconds(walthread-4) -init { + execsql { + PRAGMA journal_mode = WAL; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE); + } +} -thread r 1 { + # This connection only ever reads the database. Therefore the + # busy-handler is not required. Disable it to check that this is true. + # + # UPDATE: That is no longer entirely true - as we don't use a blocking + # lock to enter RECOVER state. Which means there is a small chance a + # reader can see an SQLITE_BUSY. + # + while {[tt_continue]} { + integrity_check + } + set {} ok +} -thread w 1 { + + proc wal_hook {zDb nEntry} { + if {$nEntry>15} {db eval {PRAGMA wal_checkpoint}} + return 0 + } + db wal_hook wal_hook + set row 1 + while {[tt_continue]} { + db eval { REPLACE INTO t1 VALUES($row, randomblob(300)) } + incr row + if {$row == 10} { set row 1 } + } + + set {} ok +} + + +# This test case attempts to provoke a deadlock condition that existed in +# the unix VFS at one point. The problem occurred only while recovering a +# very large wal file (one that requires a wal-index larger than the +# initial default allocation of 64KB). +# +do_thread_test walthread-5 -seconds $seconds(walthread-5) -init { + + proc log_file_size {nFrame pgsz} { + expr {12 + ($pgsz+16)*$nFrame} + } + + execsql { + PRAGMA page_size = 1024; + PRAGMA journal_mode = WAL; + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES(randomblob(900)); + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 2 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 32 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 64 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 128 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 256 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 512 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 1024 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 2048 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 4096 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8192 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16384 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 32768 */ + INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 65536 */ + COMMIT; + } + + file copy -force test.db-wal bak.db-wal + file copy -force test.db bak.db + db close + + file copy -force bak.db-wal test.db-wal + file copy -force bak.db test.db + + if {[file size test.db-wal] < [log_file_size [expr 64*1024] 1024]} { + error "Somehow failed to create a large log file" + } + puts "Database with large log file recovered. Now running clients..." +} -thread T 5 { + db eval { SELECT count(*) FROM t1 } +} +unset -nocomplain seconds + +finish_test diff --git a/test/where2.test b/test/where2.test index 5682ed4..d61c089 100644 --- a/test/where2.test +++ b/test/where2.test @@ -620,4 +620,35 @@ ifcapable or_opt&&tclvar { } } +# Indices with redundant columns +# +do_test where2-11.1 { + execsql { + CREATE TABLE t11(a,b,c,d); + CREATE INDEX i11aba ON t11(a,b,a,c); -- column A occurs twice. + INSERT INTO t11 VALUES(1,2,3,4); + INSERT INTO t11 VALUES(5,6,7,8); + INSERT INTO t11 VALUES(1,2,9,10); + INSERT INTO t11 VALUES(5,11,12,13); + SELECT c FROM t11 WHERE a=1 AND b=2 ORDER BY c; + } +} {3 9} +do_test where2-11.2 { + execsql { + CREATE INDEX i11cccccccc ON t11(c,c,c,c,c,c,c,c); -- repeated column + SELECT d FROM t11 WHERE c=9; + } +} {10} +do_test where2-11.3 { + execsql { + SELECT d FROM t11 WHERE c IN (1,2,3,4,5); + } +} {4} +do_test where2-11.4 { + execsql { + SELECT d FROM t11 WHERE c=7 OR (a=1 AND b=2) ORDER BY d; + } +} {4 8 10} + + finish_test diff --git a/test/where3.test b/test/where3.test index c395d0a..13f9473 100644 --- a/test/where3.test +++ b/test/where3.test @@ -199,13 +199,13 @@ do_test where3-2.5 { WHERE cpk=ax AND bpk=cx } } {tA {} tC * tB * tD *} -do_test where3-2.5 { +do_test where3-2.6 { queryplan { SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx WHERE bpk=cx AND apk=bx } } {tC {} tB * tA * tD *} -do_test where3-2.6 { +do_test where3-2.7 { queryplan { SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx WHERE cpk=bx AND apk=cx diff --git a/test/where7.test b/test/where7.test index 10a94c1..dbb9bc6 100644 --- a/test/where7.test +++ b/test/where7.test @@ -90,12 +90,10 @@ do_test where7-1.9 { } } {2 4 5 scan 0 sort 0} do_test where7-1.10 { -breakpoint count_steps { SELECT a FROM t1 WHERE (b=3 OR c>=10 OR c=4 OR b>10) } } {2 4 5 scan 0 sort 0} -breakpoint do_test where7-1.11 { count_steps { SELECT a FROM t1 WHERE (d=5 AND b=3) OR c==100 ORDER BY a; @@ -106,13 +104,7 @@ do_test where7-1.12 { SELECT a FROM t1 WHERE (b BETWEEN 2 AND 4) OR c=100 ORDER BY a } } {1 2 3 5 scan 0 sort 1} -do_test where7-1.13.1 { - count_steps { - SELECT a FROM t1 WHERE (b BETWEEN 0 AND 2) OR (c BETWEEN 9 AND 999) - ORDER BY a DESC - } -} {5 4 1 scan 4 sort 0} -do_test where7-1.13.2 { +do_test where7-1.13 { count_steps { SELECT a FROM t1 WHERE (b BETWEEN 0 AND 2) OR (c BETWEEN 9 AND 999) ORDER BY +a DESC diff --git a/test/where8.test b/test/where8.test index 04657c0..a7d5edb 100644 --- a/test/where8.test +++ b/test/where8.test @@ -286,8 +286,9 @@ do_test where8-3.15 { SELECT c FROM t1, t2 WHERE a BETWEEN 1 AND 2 OR a = ( SELECT sum(e IS NULL) FROM t2 AS inner WHERE t2.d>inner.d ) + ORDER BY c } -} {I II I II I II I II I II I II III I II III I II III I II III I II III 9 0} +} {I I I I I I I I I I II II II II II II II II II II III III III III III 9 1} #----------------------------------------------------------------------- # The following tests - where8-4.* - verify that adding or removing diff --git a/tool/lemon.c b/tool/lemon.c index 49db0e6..70d7c1c 100644 --- a/tool/lemon.c +++ b/tool/lemon.c @@ -34,6 +34,7 @@ extern int access(); #define MAXRHS 1000 #endif +static int showPrecedenceConflict = 0; static const char **made_files = NULL; static int made_files_count = 0; static int successful_exit = 0; @@ -1084,7 +1085,7 @@ static int resolve_conflict( /* Not enough precedence information. */ apy->type = SRCONFLICT; errcnt++; - }else if( spx->prec>spy->prec ){ /* Lower precedence wins */ + }else if( spx->prec>spy->prec ){ /* higher precedence wins */ apy->type = RD_RESOLVED; }else if( spx->precprec ){ apx->type = SH_RESOLVED; @@ -1404,6 +1405,7 @@ int main(int argc, char **argv) static int statistics = 0; static int mhflag = 0; static int nolinenosflag = 0; + static int noResort = 0; static struct s_options options[] = { {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."}, {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."}, @@ -1412,7 +1414,10 @@ int main(int argc, char **argv) {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."}, {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file."}, {OPT_FLAG, "l", (char*)&nolinenosflag, "Do not print #line statements."}, + {OPT_FLAG, "p", (char*)&showPrecedenceConflict, + "Show conflicts resolved by precedence rules"}, {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."}, + {OPT_FLAG, "r", (char*)&noResort, "Do not sort or renumber states"}, {OPT_FLAG, "s", (char*)&statistics, "Print parser stats to standard output."}, {OPT_FLAG, "x", (char*)&version, "Print the version number."}, @@ -1499,8 +1504,9 @@ int main(int argc, char **argv) if( compress==0 ) CompressTables(&lem); /* Reorder and renumber the states so that states with fewer choices - ** occur at the end. */ - ResortStates(&lem); + ** occur at the end. This is an optimization that helps make the + ** generated parser tables smaller. */ + if( noResort==0 ) ResortStates(&lem); /* Generate a report of the parser generated. (the "y.output" file) */ if( !quiet ) ReportOutput(&lem); @@ -2896,11 +2902,25 @@ int PrintAction(struct action *ap, FILE *fp, int indent){ indent,ap->sp->name,ap->x.rp->index); break; case SSCONFLICT: - fprintf(fp,"%*s shift %d ** Parsing conflict **", + fprintf(fp,"%*s shift %-3d ** Parsing conflict **", indent,ap->sp->name,ap->x.stp->statenum); break; case SH_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s shift %-3d -- dropped by precedence", + indent,ap->sp->name,ap->x.stp->statenum); + }else{ + result = 0; + } + break; case RD_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s reduce %-3d -- dropped by precedence", + indent,ap->sp->name,ap->x.rp->index); + }else{ + result = 0; + } + break; case NOT_USED: result = 0; break; diff --git a/tool/mksqlite3c.tcl b/tool/mksqlite3c.tcl index 38cee50..da31306 100644 --- a/tool/mksqlite3c.tcl +++ b/tool/mksqlite3c.tcl @@ -109,6 +109,7 @@ foreach hdr { sqliteLimit.h vdbe.h vdbeInt.h + wal.h } { set available_hdr($hdr) 1 } @@ -244,6 +245,7 @@ foreach file { pcache1.c rowset.c pager.c + wal.c btmutex.c btree.c diff --git a/tool/shell1.test b/tool/shell1.test index 5d625b2..dcdac75 100644 --- a/tool/shell1.test +++ b/tool/shell1.test @@ -195,7 +195,7 @@ do_test shell1-1.15.3 { # -version show SQLite version do_test shell1-1.16.1 { catchcmd "-version test.db" "" -} {0 3.6.22} +} {0 3.7.0} #---------------------------------------------------------------------------- # Test cases shell1-2.*: Basic "dot" command token parsing. @@ -369,18 +369,6 @@ do_test shell1-3.7.4 { catchcmd "test.db" ".explain OFF BAD" } {1 {Error: unknown command or invalid arguments: "explain". Enter ".help" for help}} -# .genfkey ?OPTIONS? Options are: -# --no-drop: Do not drop old fkey triggers. -# --ignore-errors: Ignore tables with fkey errors -# --exec: Execute generated SQL immediately -# See file tool/genfkey.README in the source -# distribution for further information. -do_test shell1-3.8.1 { - catchcmd "test.db" ".genfkey" -} {0 {}} -do_test shell1-3.8.2 { - catchcmd "test.db" ".genfkey FOO" -} {1 {unknown option: FOO}} # .header(s) ON|OFF Turn display of headers on or off do_test shell1-3.9.1 { @@ -702,5 +690,4 @@ do_test shell1-3.27.4 { catchcmd "test.db" ".timer OFF BAD" } {1 {Error: unknown command or invalid arguments: "timer". Enter ".help" for help}} - -# +puts "CLI tests completed successfully" diff --git a/tool/shell2.test b/tool/shell2.test index 0ecb01d..b63fafc 100644 --- a/tool/shell2.test +++ b/tool/shell2.test @@ -218,3 +218,5 @@ SELECT * FROM foo2; b 1 2}} + +puts "CLI tests completed successfully" diff --git a/tool/shell3.test b/tool/shell3.test index 4ee065b..d37adff 100644 --- a/tool/shell3.test +++ b/tool/shell3.test @@ -121,3 +121,4 @@ do_test shell3-2.7 { } {1 {Error: incomplete SQL: CREATE TABLE}} +puts "CLI tests completed successfully" diff --git a/tool/showdb.c b/tool/showdb.c index 26e7ea2..a54eea8 100644 --- a/tool/showdb.c +++ b/tool/showdb.c @@ -8,41 +8,247 @@ #include #include #include +#include -static int pagesize = 1024; -static int db = -1; -static int mxPage = 0; -static int perLine = 32; +static int pagesize = 1024; /* Size of a database page */ +static int db = -1; /* File descriptor for reading the DB */ +static int mxPage = 0; /* Last page number */ +static int perLine = 16; /* HEX elements to print per line */ +typedef long long int i64; /* Datatype for 64-bit integers */ + + +/* +** Convert the var-int format into i64. Return the number of bytes +** in the var-int. Write the var-int value into *pVal. +*/ +static int decodeVarint(const unsigned char *z, i64 *pVal){ + i64 v = 0; + int i; + for(i=0; i<8; i++){ + v = (v<<7) + (z[i]&0x7f); + if( (z[i]&0x80)==0 ){ *pVal = v; return i+1; } + } + v = (v<<8) + (z[i]&0xff); + *pVal = v; + return 9; +} + +/* Report an out-of-memory error and die. +*/ static void out_of_memory(void){ fprintf(stderr,"Out of memory...\n"); exit(1); } -static print_page(int iPg){ +/* +** Read content from the file. +** +** Space to hold the content is obtained from malloc() and needs to be +** freed by the caller. +*/ +static unsigned char *getContent(int ofst, int nByte){ + unsigned char *aData; + aData = malloc(nByte); + if( aData==0 ) out_of_memory(); + lseek(db, ofst, SEEK_SET); + read(db, aData, nByte); + return aData; +} + +/* +** Print a range of bytes as hex and as ascii. +*/ +static unsigned char *print_byte_range( + int ofst, /* First byte in the range of bytes to print */ + int nByte, /* Number of bytes to print */ + int printOfst /* Add this amount to the index on the left column */ +){ unsigned char *aData; int i, j; - aData = malloc(pagesize); - if( aData==0 ) out_of_memory(); - lseek(db, (iPg-1)*(long long int)pagesize, SEEK_SET); - read(db, aData, pagesize); - fprintf(stdout, "Page %d:\n", iPg); - for(i=0; inByte ){ + fprintf(stdout, " "); + }else{ + fprintf(stdout,"%02x ", aData[i+j]); + } } for(j=0; jnByte ){ + fprintf(stdout, " "); + }else{ + fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.'); + } } fprintf(stdout,"\n"); } + return aData; +} + +/* +** Print an entire page of content as hex +*/ +static print_page(int iPg){ + int iStart; + unsigned char *aData; + iStart = (iPg-1)*pagesize; + fprintf(stdout, "Page %d: (offsets 0x%x..0x%x)\n", + iPg, iStart, iStart+pagesize-1); + aData = print_byte_range(iStart, pagesize, 0); free(aData); } +/* Print a line of decode output showing a 4-byte integer. +*/ +static print_decode_line( + unsigned char *aData, /* Content being decoded */ + int ofst, int nByte, /* Start and size of decode */ + const char *zMsg /* Message to append */ +){ + int i, j; + int val = aData[ofst]; + char zBuf[100]; + sprintf(zBuf, " %03x: %02x", ofst, aData[ofst]); + i = strlen(zBuf); + for(j=1; j<4; j++){ + if( j>=nByte ){ + sprintf(&zBuf[i], " "); + }else{ + sprintf(&zBuf[i], " %02x", aData[ofst+j]); + val = val*256 + aData[ofst+j]; + } + i += strlen(&zBuf[i]); + } + sprintf(&zBuf[i], " %9d", val); + printf("%s %s\n", zBuf, zMsg); +} + +/* +** Decode the database header. +*/ +static void print_db_header(void){ + unsigned char *aData; + aData = print_byte_range(0, 100, 0); + printf("Decoded:\n"); + print_decode_line(aData, 16, 2, "Database page size"); + print_decode_line(aData, 18, 1, "File format write version"); + print_decode_line(aData, 19, 1, "File format read version"); + print_decode_line(aData, 20, 1, "Reserved space at end of page"); + print_decode_line(aData, 24, 4, "File change counter"); + print_decode_line(aData, 28, 4, "Size of database in pages"); + print_decode_line(aData, 32, 4, "Page number of first freelist page"); + print_decode_line(aData, 36, 4, "Number of freelist pages"); + print_decode_line(aData, 40, 4, "Schema cookie"); + print_decode_line(aData, 44, 4, "Schema format version"); + print_decode_line(aData, 48, 4, "Default page cache size"); + print_decode_line(aData, 52, 4, "Largest auto-vac root page"); + print_decode_line(aData, 56, 4, "Text encoding"); + print_decode_line(aData, 60, 4, "User version"); + print_decode_line(aData, 64, 4, "Incremental-vacuum mode"); + print_decode_line(aData, 68, 4, "meta[7]"); + print_decode_line(aData, 72, 4, "meta[8]"); + print_decode_line(aData, 76, 4, "meta[9]"); + print_decode_line(aData, 80, 4, "meta[10]"); + print_decode_line(aData, 84, 4, "meta[11]"); + print_decode_line(aData, 88, 4, "meta[12]"); + print_decode_line(aData, 92, 4, "Change counter for version number"); + print_decode_line(aData, 96, 4, "SQLite version number"); +} + +/* +** Create a description for a single cell. +*/ +static int describeCell(unsigned char cType, unsigned char *a, char **pzDesc){ + int i; + int nDesc = 0; + int n = 0; + int leftChild; + i64 nPayload; + i64 rowid; + static char zDesc[100]; + i = 0; + if( cType<=5 ){ + leftChild = ((a[0]*256 + a[1])*256 + a[2])*256 + a[3]; + a += 4; + n += 4; + sprintf(zDesc, "left-child: %d ", leftChild); + nDesc = strlen(zDesc); + } + if( cType!=5 ){ + i = decodeVarint(a, &nPayload); + a += i; + n += i; + sprintf(&zDesc[nDesc], "sz: %lld ", nPayload); + nDesc += strlen(&zDesc[nDesc]); + } + if( cType==5 || cType==13 ){ + i = decodeVarint(a, &rowid); + a += i; + n += i; + sprintf(&zDesc[nDesc], "rowid: %lld ", rowid); + nDesc += strlen(&zDesc[nDesc]); + } + *pzDesc = zDesc; + return n; +} + +/* +** Decode a btree page +*/ +static void decode_btree_page(unsigned char *a, int pgno, int hdrSize){ + const char *zType = "unknown"; + int nCell; + int i; + int iCellPtr; + switch( a[0] ){ + case 2: zType = "index interior node"; break; + case 5: zType = "table interior node"; break; + case 10: zType = "index leaf"; break; + case 13: zType = "table leaf"; break; + } + printf("Decode of btree page %d:\n", pgno); + print_decode_line(a, 0, 1, zType); + print_decode_line(a, 1, 2, "Offset to first freeblock"); + print_decode_line(a, 3, 2, "Number of cells on this page"); + nCell = a[3]*256 + a[4]; + print_decode_line(a, 5, 2, "Offset to cell content area"); + print_decode_line(a, 7, 1, "Fragmented byte count"); + if( a[0]==2 || a[0]==5 ){ + print_decode_line(a, 8, 4, "Right child"); + iCellPtr = 12; + }else{ + iCellPtr = 8; + } + for(i=0; i +#include +#include +#include +#include +#include +#include +#include + + +static int pagesize = 1024; /* Size of a database page */ +static int fd = -1; /* File descriptor for reading the WAL file */ +static int mxFrame = 0; /* Last frame */ +static int perLine = 16; /* HEX elements to print per line */ + +typedef long long int i64; /* Datatype for 64-bit integers */ + + +/* +** Convert the var-int format into i64. Return the number of bytes +** in the var-int. Write the var-int value into *pVal. +*/ +static int decodeVarint(const unsigned char *z, i64 *pVal){ + i64 v = 0; + int i; + for(i=0; i<8; i++){ + v = (v<<7) + (z[i]&0x7f); + if( (z[i]&0x80)==0 ){ *pVal = v; return i+1; } + } + v = (v<<8) + (z[i]&0xff); + *pVal = v; + return 9; +} + +/* Report an out-of-memory error and die. +*/ +static void out_of_memory(void){ + fprintf(stderr,"Out of memory...\n"); + exit(1); +} + +/* +** Read content from the file. +** +** Space to hold the content is obtained from malloc() and needs to be +** freed by the caller. +*/ +static unsigned char *getContent(int ofst, int nByte){ + unsigned char *aData; + aData = malloc(nByte); + if( aData==0 ) out_of_memory(); + lseek(fd, ofst, SEEK_SET); + read(fd, aData, nByte); + return aData; +} + +/* +** Print a range of bytes as hex and as ascii. +*/ +static void print_byte_range( + int ofst, /* First byte in the range of bytes to print */ + int nByte, /* Number of bytes to print */ + unsigned char *aData, /* Content to print */ + int printOfst /* Add this amount to the index on the left column */ +){ + int i, j; + const char *zOfstFmt; + + if( ((printOfst+nByte)&~0xfff)==0 ){ + zOfstFmt = " %03x: "; + }else if( ((printOfst+nByte)&~0xffff)==0 ){ + zOfstFmt = " %04x: "; + }else if( ((printOfst+nByte)&~0xfffff)==0 ){ + zOfstFmt = " %05x: "; + }else if( ((printOfst+nByte)&~0xffffff)==0 ){ + zOfstFmt = " %06x: "; + }else{ + zOfstFmt = " %08x: "; + } + + for(i=0; inByte ){ + fprintf(stdout, " "); + }else{ + fprintf(stdout,"%02x ", aData[i+j]); + } + } + for(j=0; jnByte ){ + fprintf(stdout, " "); + }else{ + fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.'); + } + } + fprintf(stdout,"\n"); + } +} + +/* Print a line of decode output showing a 4-byte integer. +*/ +static void print_decode_line( + unsigned char *aData, /* Content being decoded */ + int ofst, int nByte, /* Start and size of decode */ + int asHex, /* If true, output value as hex */ + const char *zMsg /* Message to append */ +){ + int i, j; + int val = aData[ofst]; + char zBuf[100]; + sprintf(zBuf, " %03x: %02x", ofst, aData[ofst]); + i = strlen(zBuf); + for(j=1; j<4; j++){ + if( j>=nByte ){ + sprintf(&zBuf[i], " "); + }else{ + sprintf(&zBuf[i], " %02x", aData[ofst+j]); + val = val*256 + aData[ofst+j]; + } + i += strlen(&zBuf[i]); + } + if( asHex ){ + sprintf(&zBuf[i], " 0x%08x", val); + }else{ + sprintf(&zBuf[i], " %9d", val); + } + printf("%s %s\n", zBuf, zMsg); +} + +/* +** Print an entire page of content as hex +*/ +static void print_frame(int iFrame){ + int iStart; + unsigned char *aData; + iStart = 32 + (iFrame-1)*(pagesize+24); + fprintf(stdout, "Frame %d: (offsets 0x%x..0x%x)\n", + iFrame, iStart, iStart+pagesize+24); + aData = getContent(iStart, pagesize+24); + print_decode_line(aData, 0, 4, 0, "Page number"); + print_decode_line(aData, 4, 4, 0, "DB size, or 0 for non-commit"); + print_decode_line(aData, 8, 4, 1, "Salt-1"); + print_decode_line(aData,12, 4, 1, "Salt-2"); + print_decode_line(aData,16, 4, 1, "Checksum-1"); + print_decode_line(aData,20, 4, 1, "Checksum-2"); + print_byte_range(iStart+24, pagesize, aData+24, 0); + free(aData); +} + +/* +** extract a 32-bit big-endian integer +*/ +static unsigned int getInt32(const unsigned char *a){ + unsigned int x = (a[0]<<24) + (a[1]<<16) + (a[2]<<8) + a[3]; + return x; +} + +/* +** Print an entire page of content as hex +*/ +static void print_oneline_frame(int iFrame){ + int iStart; + unsigned char *aData; + iStart = 32 + (iFrame-1)*(pagesize+24); + aData = getContent(iStart, 24); + fprintf(stdout, "Frame %4d: %6d %6d 0x%08x 0x%08x 0x%08x 0x%08x\n", + iFrame, + getInt32(aData), + getInt32(aData+4), + getInt32(aData+8), + getInt32(aData+12), + getInt32(aData+16), + getInt32(aData+20) + ); + free(aData); +} + +/* +** Decode the WAL header. +*/ +static void print_wal_header(void){ + unsigned char *aData; + aData = getContent(0, 32); + printf("WAL Header:\n"); + print_decode_line(aData, 0, 4,1,"Magic. 0x377f0682 (le) or 0x377f0683 (be)"); + print_decode_line(aData, 4, 4, 0, "File format"); + print_decode_line(aData, 8, 4, 0, "Database page size"); + print_decode_line(aData, 12,4, 0, "Checkpoint sequence number"); + print_decode_line(aData, 16,4, 1, "Salt-1"); + print_decode_line(aData, 20,4, 1, "Salt-2"); + print_decode_line(aData, 24,4, 1, "Checksum-1"); + print_decode_line(aData, 28,4, 1, "Checksum-2"); + free(aData); +} + +/* +** Create a description for a single cell. +*/ +static int describeCell(unsigned char cType, unsigned char *a, char **pzDesc){ + int i; + int nDesc = 0; + int n = 0; + int leftChild; + i64 nPayload; + i64 rowid; + static char zDesc[100]; + i = 0; + if( cType<=5 ){ + leftChild = ((a[0]*256 + a[1])*256 + a[2])*256 + a[3]; + a += 4; + n += 4; + sprintf(zDesc, "left-child: %d ", leftChild); + nDesc = strlen(zDesc); + } + if( cType!=5 ){ + i = decodeVarint(a, &nPayload); + a += i; + n += i; + sprintf(&zDesc[nDesc], "sz: %lld ", nPayload); + nDesc += strlen(&zDesc[nDesc]); + } + if( cType==5 || cType==13 ){ + i = decodeVarint(a, &rowid); + a += i; + n += i; + sprintf(&zDesc[nDesc], "rowid: %lld ", rowid); + nDesc += strlen(&zDesc[nDesc]); + } + *pzDesc = zDesc; + return n; +} + +/* +** Decode a btree page +*/ +static void decode_btree_page(unsigned char *a, int pgno, int hdrSize){ + const char *zType = "unknown"; + int nCell; + int i; + int iCellPtr; + switch( a[0] ){ + case 2: zType = "index interior node"; break; + case 5: zType = "table interior node"; break; + case 10: zType = "index leaf"; break; + case 13: zType = "table leaf"; break; + } + printf("Decode of btree page %d:\n", pgno); + print_decode_line(a, 0, 1, 0, zType); + print_decode_line(a, 1, 2, 0, "Offset to first freeblock"); + print_decode_line(a, 3, 2, 0, "Number of cells on this page"); + nCell = a[3]*256 + a[4]; + print_decode_line(a, 5, 2, 0, "Offset to cell content area"); + print_decode_line(a, 7, 1, 0, "Fragmented byte count"); + if( a[0]==2 || a[0]==5 ){ + print_decode_line(a, 8, 4, 0, "Right child"); + iCellPtr = 12; + }else{ + iCellPtr = 8; + } + for(i=0; imxFrame ){ + fprintf(stderr, + "Page argument should be LOWER?..UPPER?. Range 1 to %d\n", + mxFrame); + exit(1); + } + while( iStart<=iEnd ){ + print_frame(iStart); + iStart++; + } + } + } + close(fd); + return 0; +} diff --git a/tool/spaceanal.tcl b/tool/spaceanal.tcl index 3718357..bf6244e 100644 --- a/tool/spaceanal.tcl +++ b/tool/spaceanal.tcl @@ -26,14 +26,14 @@ if {[file size $file_to_analyze]<512} { exit 1 } -# Maximum distance between pages before we consider it a "gap" -# -set MAXGAP 3 - # Open the database # sqlite3 db [lindex $argv 0] -set DB [btree_open [lindex $argv 0] 1000 0] +register_dbstat_vtab db + +set pageSize [db one {PRAGMA page_size}] + +#set DB [btree_open [lindex $argv 0] 1000 0] # In-memory database for collecting statistics. This script loops through # the tables and indices in the database being analyzed, adding a row for each @@ -62,6 +62,87 @@ set tabledef\ );} mem eval $tabledef +# Create a temporary "dbstat" virtual table. +# +db eval { + CREATE VIRTUAL TABLE temp.stat USING dbstat; + CREATE TEMP TABLE dbstat AS SELECT * FROM temp.stat ORDER BY name, path; + DROP TABLE temp.stat; +} + +proc isleaf {pagetype is_index} { + return [expr {$pagetype == "leaf" || ($pagetype == "internal" && $is_index)}] +} +proc isoverflow {pagetype is_index} { + return [expr {$pagetype == "overflow"}] +} +proc isinternal {pagetype is_index} { + return [expr {$pagetype == "internal" && $is_index==0}] +} + +db func isleaf isleaf +db func isinternal isinternal +db func isoverflow isoverflow + +set sql { SELECT name, tbl_name FROM sqlite_master WHERE rootpage>0 } +foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] { + + set is_index [expr {$name!=$tblname}] + db eval { + SELECT + sum(ncell) AS nentry, + sum(isleaf(pagetype, $is_index) * ncell) AS leaf_entries, + sum(payload) AS payload, + sum(isoverflow(pagetype, $is_index) * payload) AS ovfl_payload, + sum(path LIKE '%+000000') AS ovfl_cnt, + max(mx_payload) AS mx_payload, + sum(isinternal(pagetype, $is_index)) AS int_pages, + sum(isleaf(pagetype, $is_index)) AS leaf_pages, + sum(isoverflow(pagetype, $is_index)) AS ovfl_pages, + sum(isinternal(pagetype, $is_index) * unused) AS int_unused, + sum(isleaf(pagetype, $is_index) * unused) AS leaf_unused, + sum(isoverflow(pagetype, $is_index) * unused) AS ovfl_unused + FROM temp.dbstat WHERE name = $name + } break + + # Column 'gap_cnt' is set to the number of non-contiguous entries in the + # list of pages visited if the b-tree structure is traversed in a top-down + # fashion (each node visited before its child-tree is passed). Any overflow + # chains present are traversed from start to finish before any child-tree + # is. + # + set gap_cnt 0 + set pglist [db eval { + SELECT pageno FROM temp.dbstat WHERE name = $name ORDER BY rowid + }] + set prev [lindex $pglist 0] + foreach pgno [lrange $pglist 1 end] { + if {$pgno != $prev+1} {incr gap_cnt} + set prev $pgno + } + + mem eval { + INSERT INTO space_used VALUES( + $name, + $tblname, + $is_index, + $nentry, + $leaf_entries, + $payload, + $ovfl_payload, + $ovfl_cnt, + $mx_payload, + $int_pages, + $leaf_pages, + $ovfl_pages, + $int_unused, + $leaf_unused, + $ovfl_unused, + $gap_cnt + ); + } +} + proc integerify {real} { if {[string is double -strict $real]} { return [expr {int($real)}] @@ -81,321 +162,6 @@ proc quote {txt} { return '$q' } -# This proc is a wrapper around the btree_cursor_info command. The -# second argument is an open btree cursor returned by [btree_cursor]. -# The first argument is the name of an array variable that exists in -# the scope of the caller. If the third argument is non-zero, then -# info is returned for the page that lies $up entries upwards in the -# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the -# grandparent etc.) -# -# The following entries in that array are filled in with information retrieved -# using [btree_cursor_info]: -# -# $arrayvar(page_no) = The page number -# $arrayvar(entry_no) = The entry number -# $arrayvar(page_entries) = Total number of entries on this page -# $arrayvar(cell_size) = Cell size (local payload + header) -# $arrayvar(page_freebytes) = Number of free bytes on this page -# $arrayvar(page_freeblocks) = Number of free blocks on the page -# $arrayvar(payload_bytes) = Total payload size (local + overflow) -# $arrayvar(header_bytes) = Header size in bytes -# $arrayvar(local_payload_bytes) = Local payload size -# $arrayvar(parent) = Parent page number -# -proc cursor_info {arrayvar csr {up 0}} { - upvar $arrayvar a - foreach [list a(page_no) \ - a(entry_no) \ - a(page_entries) \ - a(cell_size) \ - a(page_freebytes) \ - a(page_freeblocks) \ - a(payload_bytes) \ - a(header_bytes) \ - a(local_payload_bytes) \ - a(parent) \ - a(first_ovfl) ] [btree_cursor_info $csr $up] break -} - -# Determine the page-size of the database. This global variable is used -# throughout the script. -# -set pageSize [db eval {PRAGMA page_size}] - -# Analyze every table in the database, one at a time. -# -# The following query returns the name and root-page of each table in the -# database, including the sqlite_master table. -# -set sql { - SELECT name, rootpage FROM sqlite_master - WHERE type='table' AND rootpage>0 - UNION ALL - SELECT 'sqlite_master', 1 - ORDER BY 1 -} -set wideZero [expr {10000000000 - 10000000000}] -foreach {name rootpage} [db eval $sql] { - puts stderr "Analyzing table $name..." - - # Code below traverses the table being analyzed (table name $name), using the - # btree cursor $cursor. Statistics related to table $name are accumulated in - # the following variables: - # - set total_payload $wideZero ;# Payload space used by all entries - set total_ovfl $wideZero ;# Payload space on overflow pages - set unused_int $wideZero ;# Unused space on interior nodes - set unused_leaf $wideZero ;# Unused space on leaf nodes - set unused_ovfl $wideZero ;# Unused space on overflow pages - set cnt_ovfl $wideZero ;# Number of entries that use overflows - set cnt_leaf_entry $wideZero ;# Number of leaf entries - set cnt_int_entry $wideZero ;# Number of interor entries - set mx_payload $wideZero ;# Maximum payload size - set ovfl_pages $wideZero ;# Number of overflow pages used - set leaf_pages $wideZero ;# Number of leaf pages - set int_pages $wideZero ;# Number of interior pages - set gap_cnt 0 ;# Number of holes in the page sequence - set prev_pgno 0 ;# Last page number seen - - # As the btree is traversed, the array variable $seen($pgno) is set to 1 - # the first time page $pgno is encountered. - # - catch {unset seen} - - # The following loop runs once for each entry in table $name. The table - # is traversed using the btree cursor stored in variable $csr - # - set csr [btree_cursor $DB $rootpage 0] - for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} { - incr cnt_leaf_entry - - # Retrieve information about the entry the btree-cursor points to into - # the array variable $ci (cursor info). - # - cursor_info ci $csr - - # Check if the payload of this entry is greater than the current - # $mx_payload statistic for the table. Also increase the $total_payload - # statistic. - # - if {$ci(payload_bytes)>$mx_payload} {set mx_payload $ci(payload_bytes)} - incr total_payload $ci(payload_bytes) - - # If this entry uses overflow pages, then update the $cnt_ovfl, - # $total_ovfl, $ovfl_pages and $unused_ovfl statistics. - # - set ovfl [expr {$ci(payload_bytes)-$ci(local_payload_bytes)}] - if {$ovfl} { - incr cnt_ovfl - incr total_ovfl $ovfl - set n [expr {int(ceil($ovfl/($pageSize-4.0)))}] - incr ovfl_pages $n - incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}] - set pglist [btree_ovfl_info $DB $csr] - } else { - set pglist {} - } - - # If this is the first table entry analyzed for the page, then update - # the page-related statistics $leaf_pages and $unused_leaf. Also, if - # this page has a parent page that has not been analyzed, retrieve - # info for the parent and update statistics for it too. - # - if {![info exists seen($ci(page_no))]} { - set seen($ci(page_no)) 1 - incr leaf_pages - incr unused_leaf $ci(page_freebytes) - set pglist "$ci(page_no) $pglist" - - # Now check if the page has a parent that has not been analyzed. If - # so, update the $int_pages, $cnt_int_entry and $unused_int statistics - # accordingly. Then check if the parent page has a parent that has - # not yet been analyzed etc. - # - # set parent $ci(parent_page_no) - for {set up 1} \ - {$ci(parent)!=0 && ![info exists seen($ci(parent))]} {incr up} \ - { - # Mark the parent as seen. - # - set seen($ci(parent)) 1 - - # Retrieve info for the parent and update statistics. - cursor_info ci $csr $up - incr int_pages - incr cnt_int_entry $ci(page_entries) - incr unused_int $ci(page_freebytes) - - # parent pages come before their first child - set pglist "$ci(page_no) $pglist" - } - } - - # Check the page list for fragmentation - # - foreach pg $pglist { - if {$pg!=$prev_pgno+1 && $prev_pgno>0} { - incr gap_cnt - } - set prev_pgno $pg - } - } - btree_close_cursor $csr - - # Handle the special case where a table contains no data. In this case - # all statistics are zero, except for the number of leaf pages (1) and - # the unused bytes on leaf pages ($pageSize - 8). - # - # An exception to the above is the sqlite_master table. If it is empty - # then all statistics are zero except for the number of leaf pages (1), - # and the number of unused bytes on leaf pages ($pageSize - 112). - # - if {[llength [array names seen]]==0} { - set leaf_pages 1 - if {$rootpage==1} { - set unused_leaf [expr {$pageSize-112}] - } else { - set unused_leaf [expr {$pageSize-8}] - } - } - - # Insert the statistics for the table analyzed into the in-memory database. - # - set sql "INSERT INTO space_used VALUES(" - append sql [quote $name] - append sql ",[quote $name]" - append sql ",0" - append sql ",[expr {$cnt_leaf_entry+$cnt_int_entry}]" - append sql ",$cnt_leaf_entry" - append sql ",$total_payload" - append sql ",$total_ovfl" - append sql ",$cnt_ovfl" - append sql ",$mx_payload" - append sql ",$int_pages" - append sql ",$leaf_pages" - append sql ",$ovfl_pages" - append sql ",$unused_int" - append sql ",$unused_leaf" - append sql ",$unused_ovfl" - append sql ",$gap_cnt" - append sql ); - mem eval $sql -} - -# Analyze every index in the database, one at a time. -# -# The query below returns the name, associated table and root-page number -# for every index in the database. -# -set sql { - SELECT name, tbl_name, rootpage FROM sqlite_master WHERE type='index' - ORDER BY 2, 1 -} -foreach {name tbl_name rootpage} [db eval $sql] { - puts stderr "Analyzing index $name of table $tbl_name..." - - # Code below traverses the index being analyzed (index name $name), using the - # btree cursor $cursor. Statistics related to index $name are accumulated in - # the following variables: - # - set total_payload $wideZero ;# Payload space used by all entries - set total_ovfl $wideZero ;# Payload space on overflow pages - set unused_leaf $wideZero ;# Unused space on leaf nodes - set unused_ovfl $wideZero ;# Unused space on overflow pages - set cnt_ovfl $wideZero ;# Number of entries that use overflows - set cnt_leaf_entry $wideZero ;# Number of leaf entries - set mx_payload $wideZero ;# Maximum payload size - set ovfl_pages $wideZero ;# Number of overflow pages used - set leaf_pages $wideZero ;# Number of leaf pages - set gap_cnt 0 ;# Number of holes in the page sequence - set prev_pgno 0 ;# Last page number seen - - # As the btree is traversed, the array variable $seen($pgno) is set to 1 - # the first time page $pgno is encountered. - # - catch {unset seen} - - # The following loop runs once for each entry in index $name. The index - # is traversed using the btree cursor stored in variable $csr - # - set csr [btree_cursor $DB $rootpage 0] - for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} { - incr cnt_leaf_entry - - # Retrieve information about the entry the btree-cursor points to into - # the array variable $ci (cursor info). - # - cursor_info ci $csr - - # Check if the payload of this entry is greater than the current - # $mx_payload statistic for the table. Also increase the $total_payload - # statistic. - # - set payload [btree_keysize $csr] - if {$payload>$mx_payload} {set mx_payload $payload} - incr total_payload $payload - - # If this entry uses overflow pages, then update the $cnt_ovfl, - # $total_ovfl, $ovfl_pages and $unused_ovfl statistics. - # - set ovfl [expr {$payload-$ci(local_payload_bytes)}] - if {$ovfl} { - incr cnt_ovfl - incr total_ovfl $ovfl - set n [expr {int(ceil($ovfl/($pageSize-4.0)))}] - incr ovfl_pages $n - incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}] - } - - # If this is the first table entry analyzed for the page, then update - # the page-related statistics $leaf_pages and $unused_leaf. - # - if {![info exists seen($ci(page_no))]} { - set seen($ci(page_no)) 1 - incr leaf_pages - incr unused_leaf $ci(page_freebytes) - set pg $ci(page_no) - if {$prev_pgno>0 && $pg!=$prev_pgno+1} { - incr gap_cnt - } - set prev_pgno $ci(page_no) - } - } - btree_close_cursor $csr - - # Handle the special case where a index contains no data. In this case - # all statistics are zero, except for the number of leaf pages (1) and - # the unused bytes on leaf pages ($pageSize - 8). - # - if {[llength [array names seen]]==0} { - set leaf_pages 1 - set unused_leaf [expr {$pageSize-8}] - } - - # Insert the statistics for the index analyzed into the in-memory database. - # - set sql "INSERT INTO space_used VALUES(" - append sql [quote $name] - append sql ",[quote $tbl_name]" - append sql ",1" - append sql ",$cnt_leaf_entry" - append sql ",$cnt_leaf_entry" - append sql ",$total_payload" - append sql ",$total_ovfl" - append sql ",$cnt_ovfl" - append sql ",$mx_payload" - append sql ",0" - append sql ",$leaf_pages" - append sql ",$ovfl_pages" - append sql ",0" - append sql ",$unused_leaf" - append sql ",$unused_ovfl" - append sql ",$gap_cnt" - append sql ); - mem eval $sql -} - # Generate a single line of output in the statistics section of the # report. # @@ -548,15 +314,12 @@ proc subreport {title where} { # pages and the page size used by the database (in bytes). proc autovacuum_overhead {filePages pageSize} { - # Read the value of meta 4. If non-zero, then the database supports - # auto-vacuum. It would be possible to use "PRAGMA auto_vacuum" instead, - # but that would not work if the SQLITE_OMIT_PRAGMA macro was defined - # when the library was built. - set meta4 [lindex [btree_get_meta $::DB] 4] + # Set $autovacuum to non-zero for databases that support auto-vacuum. + set autovacuum [db one {PRAGMA auto_vacuum}] # If the database is not an auto-vacuum database or the file consists # of one page only then there is no overhead for auto-vacuum. Return zero. - if {0==$meta4 || $filePages==1} { + if {0==$autovacuum || $filePages==1} { return 0 } @@ -606,7 +369,7 @@ set inuse_percent [percent $inuse_pgcnt $file_pgcnt] set free_pgcnt [expr $file_pgcnt-$inuse_pgcnt-$av_pgcnt] set free_percent [percent $free_pgcnt $file_pgcnt] -set free_pgcnt2 [lindex [btree_get_meta $DB] 0] +set free_pgcnt2 [db one {PRAGMA freelist_count}] set free_percent2 [percent $free_pgcnt2 $file_pgcnt] set file_pgcnt2 [expr {$inuse_pgcnt+$free_pgcnt2+$av_pgcnt}]