update go deps

This commit is contained in:
Danny van Kooten 2018-11-14 12:02:41 +01:00
parent f5ddb54828
commit 00d265764a
69 changed files with 16382 additions and 7447 deletions

View File

@ -35,6 +35,7 @@ Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com> Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com> Hirotaka Yamamoto <ymmt2005 at gmail.com>
ICHINOSE Shogo <shogo82148 at gmail.com> ICHINOSE Shogo <shogo82148 at gmail.com>
Ilia Cimpoes <ichimpoesh at gmail.com>
INADA Naoki <songofacandy at gmail.com> INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com> Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com> James Harr <james.harr at gmail.com>
@ -73,6 +74,7 @@ Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com> Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com> Stanley Gunawan <gunawan.stanley at gmail.com>
Thomas Wodarek <wodarekwebpage at gmail.com> Thomas Wodarek <wodarekwebpage at gmail.com>
Tom Jenkinson <tom at tjenkinson.me>
Xiangyu Hu <xiangyu.hu at outlook.com> Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com> Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc> Xiuming Chen <cc at cxm.cc>

View File

@ -234,64 +234,64 @@ func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) erro
if err != nil { if err != nil {
return err return err
} }
return mc.writeAuthSwitchPacket(enc, false) return mc.writeAuthSwitchPacket(enc)
} }
func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, bool, error) { func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
switch plugin { switch plugin {
case "caching_sha2_password": case "caching_sha2_password":
authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
return authResp, false, nil return authResp, nil
case "mysql_old_password": case "mysql_old_password":
if !mc.cfg.AllowOldPasswords { if !mc.cfg.AllowOldPasswords {
return nil, false, ErrOldPassword return nil, ErrOldPassword
} }
// Note: there are edge cases where this should work but doesn't; // Note: there are edge cases where this should work but doesn't;
// this is currently "wontfix": // this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184 // https://github.com/go-sql-driver/mysql/issues/184
authResp := scrambleOldPassword(authData[:8], mc.cfg.Passwd) authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
return authResp, true, nil return authResp, nil
case "mysql_clear_password": case "mysql_clear_password":
if !mc.cfg.AllowCleartextPasswords { if !mc.cfg.AllowCleartextPasswords {
return nil, false, ErrCleartextPassword return nil, ErrCleartextPassword
} }
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
return []byte(mc.cfg.Passwd), true, nil return append([]byte(mc.cfg.Passwd), 0), nil
case "mysql_native_password": case "mysql_native_password":
if !mc.cfg.AllowNativePasswords { if !mc.cfg.AllowNativePasswords {
return nil, false, ErrNativePassword return nil, ErrNativePassword
} }
// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
// Native password authentication only need and will need 20-byte challenge. // Native password authentication only need and will need 20-byte challenge.
authResp := scramblePassword(authData[:20], mc.cfg.Passwd) authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
return authResp, false, nil return authResp, nil
case "sha256_password": case "sha256_password":
if len(mc.cfg.Passwd) == 0 { if len(mc.cfg.Passwd) == 0 {
return nil, true, nil return []byte{0}, nil
} }
if mc.cfg.tls != nil || mc.cfg.Net == "unix" { if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet // write cleartext auth packet
return []byte(mc.cfg.Passwd), true, nil return append([]byte(mc.cfg.Passwd), 0), nil
} }
pubKey := mc.cfg.pubKey pubKey := mc.cfg.pubKey
if pubKey == nil { if pubKey == nil {
// request public key from server // request public key from server
return []byte{1}, false, nil return []byte{1}, nil
} }
// encrypted password // encrypted password
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, false, err return enc, err
default: default:
errLog.Print("unknown auth plugin:", plugin) errLog.Print("unknown auth plugin:", plugin)
return nil, false, ErrUnknownPlugin return nil, ErrUnknownPlugin
} }
} }
@ -315,11 +315,11 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
plugin = newPlugin plugin = newPlugin
authResp, addNUL, err := mc.auth(authData, plugin) authResp, err := mc.auth(authData, plugin)
if err != nil { if err != nil {
return err return err
} }
if err = mc.writeAuthSwitchPacket(authResp, addNUL); err != nil { if err = mc.writeAuthSwitchPacket(authResp); err != nil {
return err return err
} }
@ -352,7 +352,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case cachingSha2PasswordPerformFullAuthentication: case cachingSha2PasswordPerformFullAuthentication:
if mc.cfg.tls != nil || mc.cfg.Net == "unix" { if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet // write cleartext auth packet
err = mc.writeAuthSwitchPacket([]byte(mc.cfg.Passwd), true) err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
if err != nil { if err != nil {
return err return err
} }

View File

@ -19,16 +19,6 @@ import (
"time" "time"
) )
// a copy of context.Context for Go 1.7 and earlier
type mysqlContext interface {
Done() <-chan struct{}
Err() error
// defined in context.Context, but not used in this driver:
// Deadline() (deadline time.Time, ok bool)
// Value(key interface{}) interface{}
}
type mysqlConn struct { type mysqlConn struct {
buf buffer buf buffer
netConn net.Conn netConn net.Conn
@ -45,7 +35,7 @@ type mysqlConn struct {
// for context support (Go 1.8+) // for context support (Go 1.8+)
watching bool watching bool
watcher chan<- mysqlContext watcher chan<- context.Context
closech chan struct{} closech chan struct{}
finished chan<- struct{} finished chan<- struct{}
canceled atomicError // set non-nil if conn is canceled canceled atomicError // set non-nil if conn is canceled
@ -475,7 +465,7 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
defer mc.finish() defer mc.finish()
if err = mc.writeCommandPacket(comPing); err != nil { if err = mc.writeCommandPacket(comPing); err != nil {
return return mc.markBadConn(err)
} }
return mc.readResultOK() return mc.readResultOK()
@ -595,33 +585,32 @@ func (mc *mysqlConn) watchCancel(ctx context.Context) error {
mc.cleanup() mc.cleanup()
return nil return nil
} }
// When ctx is already cancelled, don't watch it.
if err := ctx.Err(); err != nil {
return err
}
// When ctx is not cancellable, don't watch it.
if ctx.Done() == nil { if ctx.Done() == nil {
return nil return nil
} }
// When watcher is not alive, can't watch it.
mc.watching = true
select {
default:
case <-ctx.Done():
return ctx.Err()
}
if mc.watcher == nil { if mc.watcher == nil {
return nil return nil
} }
mc.watching = true
mc.watcher <- ctx mc.watcher <- ctx
return nil return nil
} }
func (mc *mysqlConn) startWatcher() { func (mc *mysqlConn) startWatcher() {
watcher := make(chan mysqlContext, 1) watcher := make(chan context.Context, 1)
mc.watcher = watcher mc.watcher = watcher
finished := make(chan struct{}) finished := make(chan struct{})
mc.finished = finished mc.finished = finished
go func() { go func() {
for { for {
var ctx mysqlContext var ctx context.Context
select { select {
case ctx = <-watcher: case ctx = <-watcher:
case <-mc.closech: case <-mc.closech:

View File

@ -77,6 +77,10 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
} }
if err != nil { if err != nil {
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
errLog.Print("net.Error from Dial()': ", nerr.Error())
return nil, driver.ErrBadConn
}
return nil, err return nil, err
} }
@ -110,18 +114,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
} }
// Send Client Authentication Packet // Send Client Authentication Packet
authResp, addNUL, err := mc.auth(authData, plugin) authResp, err := mc.auth(authData, plugin)
if err != nil { if err != nil {
// try the default auth plugin, if using the requested plugin failed // try the default auth plugin, if using the requested plugin failed
errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin plugin = defaultAuthPlugin
authResp, addNUL, err = mc.auth(authData, plugin) authResp, err = mc.auth(authData, plugin)
if err != nil { if err != nil {
mc.cleanup() mc.cleanup()
return nil, err return nil, err
} }
} }
if err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin); err != nil { if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
mc.cleanup() mc.cleanup()
return nil, err return nil, err
} }

View File

@ -243,7 +243,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
// Client Authentication Packet // Client Authentication Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, plugin string) error { func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
// Adjust client flags based on server support // Adjust client flags based on server support
clientFlags := clientProtocol41 | clientFlags := clientProtocol41 |
clientSecureConn | clientSecureConn |
@ -269,7 +269,8 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool,
// encode length of the auth plugin data // encode length of the auth plugin data
var authRespLEIBuf [9]byte var authRespLEIBuf [9]byte
authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(len(authResp))) authRespLen := len(authResp)
authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
if len(authRespLEI) > 1 { if len(authRespLEI) > 1 {
// if the length can not be written in 1 byte, it must be written as a // if the length can not be written in 1 byte, it must be written as a
// length encoded integer // length encoded integer
@ -277,9 +278,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool,
} }
pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
if addNUL {
pktLen++
}
// To specify a db name // To specify a db name
if n := len(mc.cfg.DBName); n > 0 { if n := len(mc.cfg.DBName); n > 0 {
@ -350,10 +348,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool,
// Auth Data [length encoded integer] // Auth Data [length encoded integer]
pos += copy(data[pos:], authRespLEI) pos += copy(data[pos:], authRespLEI)
pos += copy(data[pos:], authResp) pos += copy(data[pos:], authResp)
if addNUL {
data[pos] = 0x00
pos++
}
// Databasename [null terminated string] // Databasename [null terminated string]
if len(mc.cfg.DBName) > 0 { if len(mc.cfg.DBName) > 0 {
@ -364,17 +358,15 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool,
pos += copy(data[pos:], plugin) pos += copy(data[pos:], plugin)
data[pos] = 0x00 data[pos] = 0x00
pos++
// Send Auth packet // Send Auth packet
return mc.writePacket(data) return mc.writePacket(data[:pos])
} }
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte, addNUL bool) error { func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
pktLen := 4 + len(authData) pktLen := 4 + len(authData)
if addNUL {
pktLen++
}
data := mc.buf.takeSmallBuffer(pktLen) data := mc.buf.takeSmallBuffer(pktLen)
if data == nil { if data == nil {
// cannot take the buffer. Something must be wrong with the connection // cannot take the buffer. Something must be wrong with the connection
@ -384,10 +376,6 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte, addNUL bool) error {
// Add the auth data [EOF] // Add the auth data [EOF]
copy(data[4:], authData) copy(data[4:], authData)
if addNUL {
data[pktLen-1] = 0x00
}
return mc.writePacket(data) return mc.writePacket(data)
} }
@ -479,7 +467,7 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
return data[1:], "", err return data[1:], "", err
case iEOF: case iEOF:
if len(data) < 1 { if len(data) == 1 {
// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
return nil, "mysql_old_password", nil return nil, "mysql_old_password", nil
} }

8
vendor/github.com/gobuffalo/envy/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,8 @@
The MIT License (MIT)
Copyright (c) 2018 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

46
vendor/github.com/gobuffalo/envy/Makefile generated vendored Normal file
View File

@ -0,0 +1,46 @@
TAGS ?= "sqlite"
GO_BIN ?= go
install:
packr
$(GO_BIN) install -v .
deps:
$(GO_BIN) get github.com/gobuffalo/release
$(GO_BIN) get github.com/gobuffalo/packr/packr
$(GO_BIN) get -tags ${TAGS} -t ./...
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
endif
build:
packr
$(GO_BIN) build -v .
test:
packr
$(GO_BIN) test -tags ${TAGS} ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
gometalinter --vendor ./... --deadline=1m --skip=internal
update:
$(GO_BIN) get -u -tags ${TAGS}
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
endif
packr
make test
make install
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
endif
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
release:
release -y -f version.go

93
vendor/github.com/gobuffalo/envy/README.md generated vendored Normal file
View File

@ -0,0 +1,93 @@
# envy
[![Build Status](https://travis-ci.org/gobuffalo/envy.svg?branch=master)](https://travis-ci.org/gobuffalo/envy)
Envy makes working with ENV variables in Go trivial.
* Get ENV variables with default values.
* Set ENV variables safely without affecting the underlying system.
* Temporarily change ENV vars; useful for testing.
* Map all of the key/values in the ENV.
* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/))
* More!
## Installation
```text
$ go get -u github.com/gobuffalo/envy
```
## Usage
```go
func Test_Get(t *testing.T) {
r := require.New(t)
r.NotZero(os.Getenv("GOPATH"))
r.Equal(os.Getenv("GOPATH"), envy.Get("GOPATH", "foo"))
r.Equal("bar", envy.Get("IDONTEXIST", "bar"))
}
func Test_MustGet(t *testing.T) {
r := require.New(t)
r.NotZero(os.Getenv("GOPATH"))
v, err := envy.MustGet("GOPATH")
r.NoError(err)
r.Equal(os.Getenv("GOPATH"), v)
_, err = envy.MustGet("IDONTEXIST")
r.Error(err)
}
func Test_Set(t *testing.T) {
r := require.New(t)
_, err := envy.MustGet("FOO")
r.Error(err)
envy.Set("FOO", "foo")
r.Equal("foo", envy.Get("FOO", "bar"))
}
func Test_Temp(t *testing.T) {
r := require.New(t)
_, err := envy.MustGet("BAR")
r.Error(err)
envy.Temp(func() {
envy.Set("BAR", "foo")
r.Equal("foo", envy.Get("BAR", "bar"))
_, err = envy.MustGet("BAR")
r.NoError(err)
})
_, err = envy.MustGet("BAR")
r.Error(err)
}
```
## .env files support
Envy now supports loading `.env` files by using the [godotenv library](https://github.com/joho/godotenv/).
That means one can use and define multiple `.env` files which will be loaded on-demand. By default, no env files will be loaded. To load one or more, you need to call the `envy.Load` function in one of the following ways:
```go
envy.Load() // 1
envy.Load("MY_ENV_FILE") // 2
envy.Load(".env", ".env.prod") // 3
envy.Load(".env", "NON_EXISTING_FILE") // 4
// 5
envy.Load(".env")
envy.Load("NON_EXISTING_FILE")
// 6
envy.Load(".env", "NON_EXISTING_FILE", ".env.prod")
```
1. Will load the default `.env` file
2. Will load the file `MY_ENV_FILE`, **but not** `.env`
3. Will load the file `.env`, and after that will load the `.env.prod` file. If any variable is redefined in `. env.prod` it will be overwritten (will contain the `env.prod` value)
4. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available.
5. Same as 4
6. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available, **but the ones in** `.env.prod` **won't**.

232
vendor/github.com/gobuffalo/envy/envy.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
/*
package envy makes working with ENV variables in Go trivial.
* Get ENV variables with default values.
* Set ENV variables safely without affecting the underlying system.
* Temporarily change ENV vars; useful for testing.
* Map all of the key/values in the ENV.
* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/))
* More!
*/
package envy
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/joho/godotenv"
)
var gil = &sync.RWMutex{}
var env = map[string]string{}
// GO111MODULE is ENV for turning mods on/off
const GO111MODULE = "GO111MODULE"
func init() {
Load()
loadEnv()
}
// Load the ENV variables to the env map
func loadEnv() {
gil.Lock()
defer gil.Unlock()
if os.Getenv("GO_ENV") == "" {
// if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care
// about v.Value (verbose test mode); we just want to know if the test environment has defined
// it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false),
// so we could not depend on v.Value anyway.
//
if v := flag.Lookup("test.v"); v != nil {
env["GO_ENV"] = "test"
}
}
// set the GOPATH if using >= 1.8 and the GOPATH isn't set
if os.Getenv("GOPATH") == "" {
out, err := exec.Command("go", "env", "GOPATH").Output()
if err == nil {
gp := strings.TrimSpace(string(out))
os.Setenv("GOPATH", gp)
}
}
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
env[pair[0]] = os.Getenv(pair[0])
}
}
func Mods() bool {
return Get(GO111MODULE, "off") == "on"
}
// Reload the ENV variables. Useful if
// an external ENV manager has been used
func Reload() {
env = map[string]string{}
loadEnv()
}
// Load .env files. Files will be loaded in the same order that are received.
// Redefined vars will override previously existing values.
// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env
// If no arg passed, it will try to load a .env file.
func Load(files ...string) error {
// If no files received, load the default one
if len(files) == 0 {
err := godotenv.Overload()
if err == nil {
Reload()
}
return err
}
// We received a list of files
for _, file := range files {
// Check if it exists or we can access
if _, err := os.Stat(file); err != nil {
// It does not exist or we can not access.
// Return and stop loading
return err
}
// It exists and we have permission. Load it
if err := godotenv.Overload(file); err != nil {
return err
}
// Reload the env so all new changes are noticed
Reload()
}
return nil
}
// Get a value from the ENV. If it doesn't exist the
// default value will be returned.
func Get(key string, value string) string {
gil.RLock()
defer gil.RUnlock()
if v, ok := env[key]; ok {
return v
}
return value
}
// Get a value from the ENV. If it doesn't exist
// an error will be returned
func MustGet(key string) (string, error) {
gil.RLock()
defer gil.RUnlock()
if v, ok := env[key]; ok {
return v, nil
}
return "", fmt.Errorf("could not find ENV var with %s", key)
}
// Set a value into the ENV. This is NOT permanent. It will
// only affect values accessed through envy.
func Set(key string, value string) {
gil.Lock()
defer gil.Unlock()
env[key] = value
}
// MustSet the value into the underlying ENV, as well as envy.
// This may return an error if there is a problem setting the
// underlying ENV value.
func MustSet(key string, value string) error {
gil.Lock()
defer gil.Unlock()
err := os.Setenv(key, value)
if err != nil {
return err
}
env[key] = value
return nil
}
// Map all of the keys/values set in envy.
func Map() map[string]string {
gil.RLock()
defer gil.RUnlock()
cp := map[string]string{}
for k, v := range env {
cp[k] = v
}
return env
}
// Temp makes a copy of the values and allows operation on
// those values temporarily during the run of the function.
// At the end of the function run the copy is discarded and
// the original values are replaced. This is useful for testing.
// Warning: This function is NOT safe to use from a goroutine or
// from code which may access any Get or Set function from a goroutine
func Temp(f func()) {
oenv := env
env = map[string]string{}
for k, v := range oenv {
env[k] = v
}
defer func() { env = oenv }()
f()
}
func GoPath() string {
return Get("GOPATH", "")
}
func GoBin() string {
return Get("GO_BIN", "go")
}
// GoPaths returns all possible GOPATHS that are set.
func GoPaths() []string {
gp := Get("GOPATH", "")
if runtime.GOOS == "windows" {
return strings.Split(gp, ";") // Windows uses a different separator
}
return strings.Split(gp, ":")
}
func importPath(path string) string {
for _, gopath := range GoPaths() {
srcpath := filepath.Join(gopath, "src")
rel, err := filepath.Rel(srcpath, path)
if err == nil {
return filepath.ToSlash(rel)
}
}
// fallback to trim
rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src"))
rel = strings.TrimPrefix(rel, string(filepath.Separator))
return filepath.ToSlash(rel)
}
func CurrentPackage() string {
pwd, _ := os.Getwd()
return importPath(pwd)
}
func Environ() []string {
gil.RLock()
defer gil.RUnlock()
var e []string
for k, v := range env {
e = append(e, fmt.Sprintf("%s=%s", k, v))
}
return e
}

8
vendor/github.com/gobuffalo/envy/go.mod generated vendored Normal file
View File

@ -0,0 +1,8 @@
module github.com/gobuffalo/envy
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/joho/godotenv v1.3.0
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

8
vendor/github.com/gobuffalo/envy/go.sum generated vendored Normal file
View File

@ -0,0 +1,8 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

10
vendor/github.com/gobuffalo/envy/shoulders.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# github.com/gobuffalo/envy Stands on the Shoulders of Giants
github.com/gobuffalo/envy does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy)
* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv)

3
vendor/github.com/gobuffalo/envy/version.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
package envy
const Version = "v1.6.7"

21
vendor/github.com/gobuffalo/packd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2018 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

57
vendor/github.com/gobuffalo/packd/Makefile generated vendored Normal file
View File

@ -0,0 +1,57 @@
TAGS ?= "sqlite"
GO_BIN ?= go
install:
packr
$(GO_BIN) install -tags ${TAGS} -v .
make tidy
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get github.com/gobuffalo/release
$(GO_BIN) get github.com/gobuffalo/packr/packr
$(GO_BIN) get -tags ${TAGS} -t ./...
make tidy
build:
packr
$(GO_BIN) build -v .
make tidy
test:
packr
$(GO_BIN) test -tags ${TAGS} ./...
make tidy
ci-deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
gometalinter --vendor ./... --deadline=1m --skip=internal
make tidy
update:
$(GO_BIN) get -u -tags ${TAGS}
make tidy
packr
make test
make install
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
make tidy
release:
make tidy
release -y -f version.go
make tidy

24
vendor/github.com/gobuffalo/packd/README.md generated vendored Normal file
View File

@ -0,0 +1,24 @@
<p align="center"><img src="https://github.com/gobuffalo/buffalo/blob/master/logo.svg" width="360"></p>
<p align="center">
<a href="https://godoc.org/github.com/gobuffalo/packd"><img src="https://godoc.org/github.com/gobuffalo/packd?status.svg" alt="GoDoc" /></a>
<a href="https://travis-ci.org/gobuffalo/packd"><img src="https://travis-ci.org/gobuffalo/packd.svg?branch=master" alt="Build Status" /></a>
<a href="https://goreportcard.com/report/github.com/gobuffalo/packd"><img src="https://goreportcard.com/badge/github.com/gobuffalo/packd" alt="Go Report Card" /></a>
</p>
# github.com/gobuffalo/packd
This is a collection of interfaces designed to make using [github.com/gobuffalo/packr](https://github.com/gobuffalo/packr) easier, and to make the transition between v1 and v2 as seamless as possible.
They can, and should, be used for testing, alternate Box implementations, etc...
## Installation
```bash
$ go get -u -v github.com/gobuffalo/packd
```
## Memory Box
The [`packd#MemoryBox`](https://godoc.org/github.com/gobuffalo/packd#MemoryBox) is a complete, thread-safe, implementation of [`packd#Box`](https://godoc.org/github.com/gobuffalo/packd#Box)

112
vendor/github.com/gobuffalo/packd/file.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
package packd
import (
"bytes"
"fmt"
"io"
"os"
"time"
"github.com/pkg/errors"
)
var _ File = &virtualFile{}
var _ io.Reader = &virtualFile{}
var _ io.Writer = &virtualFile{}
var _ fmt.Stringer = &virtualFile{}
type virtualFile struct {
buf *bytes.Buffer
name string
info fileInfo
original []byte
}
func (f virtualFile) Name() string {
return f.name
}
func (f virtualFile) Seek(offset int64, whence int) (int64, error) {
return -1, nil
}
func (f virtualFile) FileInfo() (os.FileInfo, error) {
return f.info, nil
}
func (f *virtualFile) Close() error {
return nil
}
func (f virtualFile) Readdir(count int) ([]os.FileInfo, error) {
return []os.FileInfo{f.info}, nil
}
func (f virtualFile) Stat() (os.FileInfo, error) {
return f.info, nil
}
func (f virtualFile) String() string {
return string(f.original)
}
func (s *virtualFile) Read(p []byte) (int, error) {
i, err := s.buf.Read(p)
if i == 0 || err == io.EOF {
s.buf = bytes.NewBuffer(s.original)
}
return i, err
}
func (s *virtualFile) Write(p []byte) (int, error) {
bb := &bytes.Buffer{}
i, err := bb.Write(p)
if err != nil {
return i, errors.WithStack(err)
}
s.buf = bb
s.original = bb.Bytes()
s.info = fileInfo{
Path: s.name,
Contents: bb.Bytes(),
size: int64(bb.Len()),
modTime: time.Now(),
}
return i, nil
}
// NewDir returns a new "virtual" file
func NewFile(name string, r io.Reader) (File, error) {
bb := &bytes.Buffer{}
if r != nil {
io.Copy(bb, r)
}
return &virtualFile{
buf: bb,
name: name,
original: bb.Bytes(),
info: fileInfo{
Path: name,
Contents: bb.Bytes(),
size: int64(bb.Len()),
modTime: time.Now(),
},
}, nil
}
// NewDir returns a new "virtual" directory
func NewDir(name string) (File, error) {
bb := &bytes.Buffer{}
return &virtualFile{
buf: bb,
name: name,
info: fileInfo{
Path: name,
Contents: bb.Bytes(),
size: int64(bb.Len()),
modTime: time.Now(),
isDir: true,
},
}, nil
}

View File

@ -1,10 +1,12 @@
package packr package packd
import ( import (
"os" "os"
"time" "time"
) )
var _ os.FileInfo = fileInfo{}
type fileInfo struct { type fileInfo struct {
Path string Path string
Contents []byte Contents []byte

8
vendor/github.com/gobuffalo/packd/go.mod generated vendored Normal file
View File

@ -0,0 +1,8 @@
module github.com/gobuffalo/packd
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

8
vendor/github.com/gobuffalo/packd/go.sum generated vendored Normal file
View File

@ -0,0 +1,8 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

83
vendor/github.com/gobuffalo/packd/interfaces.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package packd
import (
"fmt"
"io"
"net/http"
"os"
)
type WalkFunc func(string, File) error
// Box represents the entirety of the necessary
// interfaces to form a "full" box.
// github.com/gobuffalo/packr#Box is an example of this interface.
type Box interface {
HTTPBox
Lister
Addable
Finder
Walkable
Haser
}
type Haser interface {
Has(string) bool
}
type Walker interface {
Walk(wf WalkFunc) error
}
type Walkable interface {
Walker
WalkPrefix(prefix string, wf WalkFunc) error
}
type Finder interface {
Find(string) ([]byte, error)
FindString(name string) (string, error)
}
type HTTPBox interface {
Open(name string) (http.File, error)
}
type Lister interface {
List() []string
}
type Addable interface {
AddString(path string, t string) error
AddBytes(path string, t []byte) error
}
type SimpleFile interface {
fmt.Stringer
io.Reader
io.Writer
Name() string
}
type HTTPFile interface {
SimpleFile
io.Closer
io.Seeker
Readdir(count int) ([]os.FileInfo, error)
Stat() (os.FileInfo, error)
}
type File interface {
HTTPFile
FileInfo() (os.FileInfo, error)
}
// LegacyBox represents deprecated methods
// that older Box implementations might have had.
// github.com/gobuffalo/packr v1 is an example of a LegacyBox.
type LegacyBox interface {
String(name string) string
MustString(name string) (string, error)
Bytes(name string) []byte
MustBytes(name string) ([]byte, error)
}

178
vendor/github.com/gobuffalo/packd/memory_box.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
package packd
import (
"bytes"
"fmt"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
)
var _ Addable = NewMemoryBox()
var _ Finder = NewMemoryBox()
var _ Lister = NewMemoryBox()
var _ HTTPBox = NewMemoryBox()
var _ Haser = NewMemoryBox()
var _ Walkable = NewMemoryBox()
var _ Box = NewMemoryBox()
// MemoryBox is a thread-safe, in-memory, implementation of the Box interface.
type MemoryBox struct {
files *sync.Map
}
func (m *MemoryBox) Has(path string) bool {
_, ok := m.files.Load(path)
return ok
}
func (m *MemoryBox) List() []string {
var names []string
m.files.Range(func(key interface{}, value interface{}) bool {
if s, ok := key.(string); ok {
names = append(names, s)
}
return true
})
sort.Strings(names)
return names
}
func (m *MemoryBox) Open(path string) (http.File, error) {
cpath := strings.TrimPrefix(path, "/")
if filepath.Ext(cpath) == "" {
// it's a directory
return NewDir(path)
}
if len(cpath) == 0 {
cpath = "index.html"
}
b, err := m.Find(cpath)
if err != nil {
return nil, err
}
cpath = filepath.FromSlash(cpath)
f, err := NewFile(cpath, bytes.NewReader(b))
if err != nil {
return nil, err
}
return f, nil
}
func (m *MemoryBox) FindString(path string) (string, error) {
bb, err := m.Find(path)
return string(bb), err
}
func (m *MemoryBox) Find(path string) ([]byte, error) {
res, ok := m.files.Load(path)
if !ok {
var b []byte
lpath := strings.ToLower(path)
err := m.Walk(func(p string, file File) error {
lp := strings.ToLower(p)
if lp != lpath {
return nil
}
res := file.String()
b = []byte(res)
m.AddString(lp, res)
return nil
})
if err != nil {
return b, os.ErrNotExist
}
if len(b) == 0 {
return b, os.ErrNotExist
}
return b, nil
}
b, ok := res.([]byte)
if !ok {
return nil, fmt.Errorf("expected []byte got %T", res)
}
return b, nil
}
func (m *MemoryBox) AddString(path string, t string) error {
return m.AddBytes(path, []byte(t))
}
func (m *MemoryBox) AddBytes(path string, t []byte) error {
m.files.Store(path, t)
return nil
}
func (m *MemoryBox) Walk(wf WalkFunc) error {
var err error
m.files.Range(func(key interface{}, res interface{}) bool {
path, ok := key.(string)
if !ok {
err = fmt.Errorf("expected string got %T", key)
return false
}
b, ok := res.([]byte)
if !ok {
err = fmt.Errorf("expected []byte got %T", res)
return false
}
var f File
f, err = NewFile(path, bytes.NewReader(b))
if err != nil {
return false
}
err = wf(path, f)
if err != nil {
if errors.Cause(err) == filepath.SkipDir {
err = nil
return true
}
return false
}
return true
})
if errors.Cause(err) == filepath.SkipDir {
return nil
}
return err
}
func (m *MemoryBox) WalkPrefix(pre string, wf WalkFunc) error {
return m.Walk(func(path string, file File) error {
if strings.HasPrefix(path, pre) {
return wf(path, file)
}
return nil
})
}
func (m *MemoryBox) Remove(path string) {
m.files.Delete(path)
m.files.Delete(strings.ToLower(path))
}
// NewMemoryBox returns a configured *MemoryBox
func NewMemoryBox() *MemoryBox {
return &MemoryBox{
files: &sync.Map{},
}
}

45
vendor/github.com/gobuffalo/packd/skip_walker.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package packd
import (
"path/filepath"
"strings"
"github.com/pkg/errors"
)
var CommonSkipPrefixes = []string{".", "_", "node_modules", "vendor"}
// SkipWalker will walk the Walker and call the WalkFunc for files who's directories
// do no match any of the skipPrefixes. If no skipPrefixes are passed, then
// CommonSkipPrefixes is used
func SkipWalker(walker Walker, skipPrefixes []string, wf WalkFunc) error {
if len(skipPrefixes) == 0 {
skipPrefixes = append(skipPrefixes, CommonSkipPrefixes...)
}
return walker.Walk(func(path string, file File) error {
fi, err := file.FileInfo()
if err != nil {
return errors.WithStack(err)
}
path = strings.Replace(path, "\\", "/", -1)
parts := strings.Split(path, "/")
if !fi.IsDir() {
parts = parts[:len(parts)-1]
}
for _, base := range parts {
if base != "." {
for _, skip := range skipPrefixes {
skip = strings.ToLower(skip)
lbase := strings.ToLower(base)
if strings.HasPrefix(lbase, skip) {
return filepath.SkipDir
}
}
}
}
return wf(path, file)
})
}

4
vendor/github.com/gobuffalo/packd/version.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
package packd
// Version of packd
const Version = "v0.0.1"

36
vendor/github.com/gobuffalo/packr/Makefile generated vendored Normal file
View File

@ -0,0 +1,36 @@
TAGS ?= "sqlite"
GO_BIN ?= go
install: deps
packr
$(GO_BIN) install -v .
deps:
$(GO_BIN) get github.com/gobuffalo/packr/packr
$(GO_BIN) get -tags ${TAGS} -t ./...
build: deps
packr
$(GO_BIN) build -v .
test:
packr
$(GO_BIN) test -tags ${TAGS} ./...
ci-test: deps
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
gometalinter --vendor ./... --deadline=1m --skip=internal
update:
$(GO_BIN) get -u
$(GO_BIN) mod tidy
packr
make test
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
release:
release -y -f version.go

View File

@ -10,8 +10,16 @@ To get an idea of the what and why of packr, please enjoy this short video: [htt
## Installation ## Installation
To install Packr utility
```text ```text
$ go get -u github.com/gobuffalo/packr/... $ go get -u github.com/gobuffalo/packr/packr
```
To get the dependency
```text
$ go get -u github.com/gobuffalo/packr
``` ```
## Usage ## Usage
@ -24,17 +32,11 @@ The first step in using Packr is to create a new box. A box represents a folder
// set up a new box by giving it a (relative) path to a folder on disk: // set up a new box by giving it a (relative) path to a folder on disk:
box := packr.NewBox("./templates") box := packr.NewBox("./templates")
// Get the string representation of a file:
html := box.String("index.html")
// Get the string representation of a file, or an error if it doesn't exist: // Get the string representation of a file, or an error if it doesn't exist:
html, err := box.MustString("index.html") html, err := box.FindString("index.html")
// Get the []byte representation of a file:
html := box.Bytes("index.html")
// Get the []byte representation of a file, or an error if it doesn't exist: // Get the []byte representation of a file, or an error if it doesn't exist:
html, err := box.MustBytes("index.html") html, err := box.FindBytes("index.html")
``` ```
### What is a Box? ### What is a Box?
@ -85,7 +87,7 @@ Packr uses the following resolution rules when looking for a file:
Because Packr knows how to fall through to the file system, developers don't need to worry about constantly compiling their static files into a binary. They can work unimpeded. Because Packr knows how to fall through to the file system, developers don't need to worry about constantly compiling their static files into a binary. They can work unimpeded.
Packr takes file resolution a step further. When declaring a new box you use a relative path, `./templates`. When Packr recieves this call it calculates out the absolute path to that directory. By doing this it means you can be guaranteed that Packr can find your files correctly, even if you're not running in the directory that the box was created in. This helps with the problem of testing, where Go changes the `pwd` for each package, making relative paths difficult to work with. This is not a problem when using Packr. Packr takes file resolution a step further. When declaring a new box you use a relative path, `./templates`. When Packr receives this call it calculates out the absolute path to that directory. By doing this it means you can be guaranteed that Packr can find your files correctly, even if you're not running in the directory that the box was created in. This helps with the problem of testing, where Go changes the `pwd` for each package, making relative paths difficult to work with. This is not a problem when using Packr.
--- ---
@ -140,6 +142,57 @@ Why do you want to do this? Packr first looks to the information stored in these
--- ---
## Building/Moving a portable release
When it comes to building multiple releases you typically want that release to be built in a specific directory.
For example: `./releases`
However, because passing a `.go` file requires absolute paths, we must compile the release in the appropriate absolute path.
```bash
GOOS=linux GOARCH=amd64 packr build
```
Now your `project_name` binary will be built at the root of your project dir. Great!
All that is left to do is to move that binary to your release dir:
Linux/macOS/Windows (bash)
```bash
mv ./project_name ./releases
```
Windows (cmd):
```cmd
move ./project_name ./releases
```
Powershell:
```powershell
Move-Item -Path .\project_name -Destination .\releases\
```
If you _target_ for Windows when building don't forget that it's `project_name.exe`
Now you can make multiple releases and all of your needed static files will be available!
#### Summing it up:
Example Script for building to 3 common targets:
```bash
GOOS=darwin GOARCH=amd64 packr build && mv ./project_name ./releases/darwin-project_name \
&& GOOS=linux GOARCH=amd64 packr build && mv ./project_name ./releases/linux-project_name \
&& GOOS=windows GOARCH=386 packr build && mv ./project_name.exe ./releases/project_name.exe \
&& packr clean
```
---
## Debugging ## Debugging
The `packr` command passes all arguments down to the underlying `go` command, this includes the `-v` flag to print out `go build` information. Packr looks for the `-v` flag, and will turn on its own verbose logging. This is very useful for trying to understand what the `packr` command is doing when it is run. The `packr` command passes all arguments down to the underlying `go` command, this includes the `-v` flag to print out `go build` information. Packr looks for the `-v` flag, and will turn on its own verbose logging. This is very useful for trying to understand what the `packr` command is doing when it is run.

View File

@ -11,13 +11,24 @@ import (
"runtime" "runtime"
"strings" "strings"
"github.com/gobuffalo/packd"
"github.com/markbates/oncer"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var ( var (
// ErrResOutsideBox gets returned in case of the requested resources being outside the box
ErrResOutsideBox = errors.New("Can't find a resource outside the box") ErrResOutsideBox = errors.New("Can't find a resource outside the box")
) )
var _ packd.Box = Box{}
var _ packd.HTTPBox = Box{}
var _ packd.Lister = Box{}
var _ packd.Addable = Box{}
var _ packd.Walkable = Box{}
var _ packd.Finder = Box{}
var _ packd.LegacyBox = Box{}
// NewBox returns a Box that can be used to // NewBox returns a Box that can be used to
// retrieve files from either disk or the embedded // retrieve files from either disk or the embedded
// binary. // binary.
@ -51,35 +62,54 @@ type Box struct {
directories map[string]bool directories map[string]bool
} }
func (b Box) AddString(path string, t string) { // AddString converts t to a byteslice and delegates to AddBytes to add to b.data
func (b Box) AddString(path string, t string) error {
b.AddBytes(path, []byte(t)) b.AddBytes(path, []byte(t))
return nil
} }
func (b Box) AddBytes(path string, t []byte) { // AddBytes sets t in b.data by the given path
func (b Box) AddBytes(path string, t []byte) error {
b.data[path] = t b.data[path] = t
return nil
} }
// String of the file asked for or an empty string. // String is deprecated. Use Find instead
func (b Box) String(name string) string { func (b Box) String(name string) string {
return string(b.Bytes(name)) oncer.Deprecate(0, "github.com/gobuffalo/packr#Box.String", "Use github.com/gobuffalo/packr#Box.FindString instead.")
} bb, _ := b.FindString(name)
// MustString returns either the string of the requested
// file or an error if it can not be found.
func (b Box) MustString(name string) (string, error) {
bb, err := b.MustBytes(name)
return string(bb), err
}
// Bytes of the file asked for or an empty byte slice.
func (b Box) Bytes(name string) []byte {
bb, _ := b.MustBytes(name)
return bb return bb
} }
// MustBytes returns either the byte slice of the requested // MustString is deprecated. Use FindString instead
// file or an error if it can not be found. func (b Box) MustString(name string) (string, error) {
oncer.Deprecate(0, "github.com/gobuffalo/packr#Box.MustString", "Use github.com/gobuffalo/packr#Box.FindString instead.")
return b.FindString(name)
}
// Bytes is deprecated. Use Find instead
func (b Box) Bytes(name string) []byte {
oncer.Deprecate(0, "github.com/gobuffalo/packr#Box.Bytes", "Use github.com/gobuffalo/packr#Box.Find instead.")
bb, _ := b.Find(name)
return bb
}
// Bytes is deprecated. Use Find instead
func (b Box) MustBytes(name string) ([]byte, error) { func (b Box) MustBytes(name string) ([]byte, error) {
oncer.Deprecate(0, "github.com/gobuffalo/packr#Box.MustBytes", "Use github.com/gobuffalo/packr#Box.Find instead.")
return b.Find(name)
}
// FindString returns either the string of the requested
// file or an error if it can not be found.
func (b Box) FindString(name string) (string, error) {
bb, err := b.Find(name)
return string(bb), err
}
// Find returns either the byte slice of the requested
// file or an error if it can not be found.
func (b Box) Find(name string) ([]byte, error) {
f, err := b.find(name) f, err := b.find(name)
if err == nil { if err == nil {
bb := &bytes.Buffer{} bb := &bytes.Buffer{}
@ -112,8 +142,9 @@ func (b Box) decompress(bb []byte) []byte {
func (b Box) find(name string) (File, error) { func (b Box) find(name string) (File, error) {
if bb, ok := b.data[name]; ok { if bb, ok := b.data[name]; ok {
return newVirtualFile(name, bb), nil return packd.NewFile(name, bytes.NewReader(bb))
} }
if b.directories == nil { if b.directories == nil {
b.indexDirectories() b.indexDirectories()
} }
@ -126,20 +157,19 @@ func (b Box) find(name string) (File, error) {
// Absolute name is considered as relative to the box root // Absolute name is considered as relative to the box root
cleanName = strings.TrimPrefix(cleanName, "/") cleanName = strings.TrimPrefix(cleanName, "/")
// Try to get the resource from the box
if _, ok := data[b.Path]; ok { if _, ok := data[b.Path]; ok {
if bb, ok := data[b.Path][cleanName]; ok { if bb, ok := data[b.Path][cleanName]; ok {
bb = b.decompress(bb) bb = b.decompress(bb)
return newVirtualFile(cleanName, bb), nil return packd.NewFile(cleanName, bytes.NewReader(bb))
}
if _, ok := b.directories[cleanName]; ok {
return packd.NewDir(cleanName)
} }
if filepath.Ext(cleanName) != "" { if filepath.Ext(cleanName) != "" {
// The Handler created by http.FileSystem checks for those errors and // The Handler created by http.FileSystem checks for those errors and
// returns http.StatusNotFound instead of http.StatusInternalServerError. // returns http.StatusNotFound instead of http.StatusInternalServerError.
return nil, os.ErrNotExist return nil, os.ErrNotExist
} }
if _, ok := b.directories[cleanName]; ok {
return newVirtualDir(cleanName), nil
}
return nil, os.ErrNotExist return nil, os.ErrNotExist
} }
@ -149,46 +179,6 @@ func (b Box) find(name string) (File, error) {
return fileFor(p, cleanName) return fileFor(p, cleanName)
} }
type WalkFunc func(string, File) error
func (b Box) Walk(wf WalkFunc) error {
if data[b.Path] == nil {
base, err := filepath.EvalSymlinks(filepath.Join(b.callingDir, b.Path))
if err != nil {
return errors.WithStack(err)
}
return filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
cleanName, err := filepath.Rel(base, path)
if err != nil {
cleanName = strings.TrimPrefix(path, base)
}
cleanName = filepath.ToSlash(filepath.Clean(cleanName))
cleanName = strings.TrimPrefix(cleanName, "/")
cleanName = filepath.FromSlash(cleanName)
if info == nil || info.IsDir() {
return nil
}
file, err := fileFor(path, cleanName)
if err != nil {
return err
}
return wf(cleanName, file)
})
}
for n := range data[b.Path] {
f, err := b.find(n)
if err != nil {
return err
}
err = wf(n, f)
if err != nil {
return err
}
}
return nil
}
// Open returns a File using the http.File interface // Open returns a File using the http.File interface
func (b Box) Open(name string) (http.File, error) { func (b Box) Open(name string) (http.File, error) {
return b.find(name) return b.find(name)
@ -232,10 +222,10 @@ func fileFor(p string, name string) (File, error) {
return nil, err return nil, err
} }
if fi.IsDir() { if fi.IsDir() {
return newVirtualDir(p), nil return packd.NewDir(p)
} }
if bb, err := ioutil.ReadFile(p); err == nil { if bb, err := ioutil.ReadFile(p); err == nil {
return newVirtualFile(name, bb), nil return packd.NewFile(name, bytes.NewReader(bb))
} }
return nil, os.ErrNotExist return nil, os.ErrNotExist
} }

View File

@ -1,27 +1,13 @@
package packr package packr
import ( import (
"go/build" "github.com/gobuffalo/envy"
"os"
"strings"
) )
// GoPath returns the current GOPATH env var // GoPath returns the current GOPATH env var
// or if it's missing, the default. // or if it's missing, the default.
func GoPath() string { var GoPath = envy.GoPath
go_path := strings.Split(os.Getenv("GOPATH"), string(os.PathListSeparator))
if len(go_path) == 0 || go_path[0] == "" {
return build.Default.GOPATH
}
return go_path[0]
}
// GoBin returns the current GO_BIN env var // GoBin returns the current GO_BIN env var
// or if it's missing, a default of "go" // or if it's missing, a default of "go"
func GoBin() string { var GoBin = envy.GoBin
go_bin := os.Getenv("GO_BIN")
if go_bin == "" {
return "go"
}
return go_bin
}

View File

@ -1,15 +1,5 @@
package packr package packr
import ( import "github.com/gobuffalo/packd"
"io"
"os"
)
type File interface { type File = packd.File
io.ReadCloser
io.Writer
FileInfo() (os.FileInfo, error)
Readdir(count int) ([]os.FileInfo, error)
Seek(offset int64, whence int) (int64, error)
Stat() (os.FileInfo, error)
}

View File

@ -1,3 +1,13 @@
module github.com/gobuffalo/packr module github.com/gobuffalo/packr
require github.com/pkg/errors v0.8.0 require (
github.com/gobuffalo/envy v1.6.8
github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4
github.com/pkg/errors v0.8.0
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3 // indirect
github.com/stretchr/testify v1.2.2
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
)

24
vendor/github.com/gobuffalo/packr/go.sum generated vendored Normal file
View File

@ -0,0 +1,24 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gobuffalo/envy v1.6.8 h1:ExvxBMO2VoANkwLkQcY8yTB73YkkIOfi9CyinoE+vyk=
github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff h1:FFjrU4aPGxtiWlhmLdeqEGFcs17YJfJ/i3Zm+cO5fkQ=
github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4 h1:Mlji5gkcpzkqTROyE4ZxZ8hN7osunMb2RuGVrbvMvCc=
github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -4,6 +4,8 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"encoding/json" "encoding/json"
"runtime"
"strings"
"sync" "sync"
) )
@ -53,3 +55,20 @@ func UnpackBytes(box string) {
defer gil.Unlock() defer gil.Unlock()
delete(data, box) delete(data, box)
} }
func osPaths(paths ...string) []string {
if runtime.GOOS == "windows" {
for i, path := range paths {
paths[i] = strings.Replace(path, "/", "\\", -1)
}
}
return paths
}
func osPath(path string) string {
if runtime.GOOS == "windows" {
return strings.Replace(path, "/", "\\", -1)
}
return path
}

View File

@ -1,13 +0,0 @@
package packr
import "os"
var _ File = physicalFile{}
type physicalFile struct {
*os.File
}
func (p physicalFile) FileInfo() (os.FileInfo, error) {
return os.Stat(p.Name())
}

18
vendor/github.com/gobuffalo/packr/shoulders.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# github.com/gobuffalo/packr Stands on the Shoulders of Giants
github.com/gobuffalo/packr does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy)
* [github.com/gobuffalo/packd](https://godoc.org/github.com/gobuffalo/packd)
* [github.com/gobuffalo/packr](https://godoc.org/github.com/gobuffalo/packr)
* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv)
* [github.com/markbates/oncer](https://godoc.org/github.com/markbates/oncer)
* [github.com/pkg/errors](https://godoc.org/github.com/pkg/errors)

3
vendor/github.com/gobuffalo/packr/version.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
package packr
const Version = "v1.20.0"

View File

@ -1,57 +0,0 @@
package packr
import (
"bytes"
"fmt"
"os"
"time"
)
var virtualFileModTime = time.Now()
var _ File = virtualFile{}
type virtualFile struct {
*bytes.Reader
Name string
info fileInfo
}
func (f virtualFile) FileInfo() (os.FileInfo, error) {
return f.info, nil
}
func (f virtualFile) Close() error {
return nil
}
func (f virtualFile) Write(p []byte) (n int, err error) {
return 0, fmt.Errorf("not implemented")
}
func (f virtualFile) Readdir(count int) ([]os.FileInfo, error) {
return []os.FileInfo{f.info}, nil
}
func (f virtualFile) Stat() (os.FileInfo, error) {
return f.info, nil
}
func newVirtualFile(name string, b []byte) File {
return virtualFile{
Reader: bytes.NewReader(b),
Name: name,
info: fileInfo{
Path: name,
Contents: b,
size: int64(len(b)),
modTime: virtualFileModTime,
},
}
}
func newVirtualDir(name string) File {
var b []byte
v := newVirtualFile(name, b).(virtualFile)
v.info.isDir = true
return v
}

64
vendor/github.com/gobuffalo/packr/walk.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package packr
import (
"os"
"path/filepath"
"strings"
"github.com/gobuffalo/packd"
"github.com/pkg/errors"
)
type WalkFunc = packd.WalkFunc
// Walk will traverse the box and call the WalkFunc for each file in the box/folder.
func (b Box) Walk(wf WalkFunc) error {
if data[b.Path] == nil {
base, err := filepath.EvalSymlinks(filepath.Join(b.callingDir, b.Path))
if err != nil {
return errors.WithStack(err)
}
return filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
cleanName, err := filepath.Rel(base, path)
if err != nil {
cleanName = strings.TrimPrefix(path, base)
}
cleanName = filepath.ToSlash(filepath.Clean(cleanName))
cleanName = strings.TrimPrefix(cleanName, "/")
cleanName = filepath.FromSlash(cleanName)
if info == nil || info.IsDir() {
return nil
}
file, err := fileFor(path, cleanName)
if err != nil {
return err
}
return wf(cleanName, file)
})
}
for n := range data[b.Path] {
f, err := b.find(n)
if err != nil {
return err
}
err = wf(n, f)
if err != nil {
return err
}
}
return nil
}
// WalkPrefix will call box.Walk and call the WalkFunc when it finds paths that have a matching prefix
func (b Box) WalkPrefix(prefix string, wf WalkFunc) error {
opre := osPath(prefix)
return b.Walk(func(path string, f File) error {
if strings.HasPrefix(osPath(path), opre) {
if err := wf(path, f); err != nil {
return errors.WithStack(err)
}
}
return nil
})
}

View File

@ -20,6 +20,8 @@ explains how to use `database/sql` along with sqlx.
## Recent Changes ## Recent Changes
* The [introduction](https://github.com/jmoiron/sqlx/pull/387) of `sql.ColumnType` sets the required minimum Go version to 1.8.
* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions. * sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
This breaks backwards compatibility, but it's in a way that is trivially fixable This breaks backwards compatibility, but it's in a way that is trivially fixable

View File

@ -2,6 +2,7 @@ package sqlx
import ( import (
"bytes" "bytes"
"database/sql/driver"
"errors" "errors"
"reflect" "reflect"
"strconv" "strconv"
@ -16,12 +17,13 @@ const (
QUESTION QUESTION
DOLLAR DOLLAR
NAMED NAMED
AT
) )
// BindType returns the bindtype for a given database given a drivername. // BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int { func BindType(driverName string) int {
switch driverName { switch driverName {
case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql":
return DOLLAR return DOLLAR
case "mysql": case "mysql":
return QUESTION return QUESTION
@ -29,6 +31,8 @@ func BindType(driverName string) int {
return QUESTION return QUESTION
case "oci8", "ora", "goracle": case "oci8", "ora", "goracle":
return NAMED return NAMED
case "sqlserver":
return AT
} }
return UNKNOWN return UNKNOWN
} }
@ -56,6 +60,8 @@ func Rebind(bindType int, query string) string {
rqb = append(rqb, '$') rqb = append(rqb, '$')
case NAMED: case NAMED:
rqb = append(rqb, ':', 'a', 'r', 'g') rqb = append(rqb, ':', 'a', 'r', 'g')
case AT:
rqb = append(rqb, '@', 'p')
} }
j++ j++
@ -110,6 +116,9 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
meta := make([]argMeta, len(args)) meta := make([]argMeta, len(args))
for i, arg := range args { for i, arg := range args {
if a, ok := arg.(driver.Valuer); ok {
arg, _ = a.Value()
}
v := reflect.ValueOf(arg) v := reflect.ValueOf(arg)
t := reflectx.Deref(v.Type()) t := reflectx.Deref(v.Type())
@ -137,7 +146,7 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
} }
newArgs := make([]interface{}, 0, flatArgsCount) newArgs := make([]interface{}, 0, flatArgsCount)
buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount)) buf := make([]byte, 0, len(query)+len(", ?")*flatArgsCount)
var arg, offset int var arg, offset int
@ -163,10 +172,10 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
} }
// write everything up to and including our ? character // write everything up to and including our ? character
buf.WriteString(query[:offset+i+1]) buf = append(buf, query[:offset+i+1]...)
for si := 1; si < argMeta.length; si++ { for si := 1; si < argMeta.length; si++ {
buf.WriteString(", ?") buf = append(buf, ", ?"...)
} }
newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
@ -177,13 +186,13 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
offset = 0 offset = 0
} }
buf.WriteString(query) buf = append(buf, query...)
if arg < len(meta) { if arg < len(meta) {
return "", nil, errors.New("number of bindVars less than number arguments") return "", nil, errors.New("number of bindVars less than number arguments")
} }
return buf.String(), newArgs, nil return string(buf), newArgs, nil
} }
func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {

7
vendor/github.com/jmoiron/sqlx/go.mod generated vendored Normal file
View File

@ -0,0 +1,7 @@
module github.com/jmoiron/sqlx
require (
github.com/go-sql-driver/mysql v1.4.0
github.com/lib/pq v1.0.0
github.com/mattn/go-sqlite3 v1.9.0
)

6
vendor/github.com/jmoiron/sqlx/go.sum generated vendored Normal file
View File

@ -0,0 +1,6 @@
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=

View File

@ -259,6 +259,10 @@ func compileNamedQuery(qs []byte, bindType int) (query string, names []string, e
} }
inName = true inName = true
name = []byte{} name = []byte{}
} else if inName && i > 0 && b == '=' {
rebound = append(rebound, ':', '=')
inName = false
continue
// if we're in a name, and this is an allowed character, continue // if we're in a name, and this is an allowed character, continue
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
// append the byte to the name if we are in a name and not on the last byte // append the byte to the name if we are in a name and not on the last byte
@ -287,6 +291,12 @@ func compileNamedQuery(qs []byte, bindType int) (query string, names []string, e
rebound = append(rebound, byte(b)) rebound = append(rebound, byte(b))
} }
currentVar++ currentVar++
case AT:
rebound = append(rebound, '@', 'p')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
} }
// add this byte to string unless it was not part of the name // add this byte to string unless it was not part of the name
if i != last { if i != last {

View File

@ -149,15 +149,15 @@ func isUnsafe(i interface{}) bool {
} }
func mapperFor(i interface{}) *reflectx.Mapper { func mapperFor(i interface{}) *reflectx.Mapper {
switch i.(type) { switch i := i.(type) {
case DB: case DB:
return i.(DB).Mapper return i.Mapper
case *DB: case *DB:
return i.(*DB).Mapper return i.Mapper
case Tx: case Tx:
return i.(Tx).Mapper return i.Mapper
case *Tx: case *Tx:
return i.(*Tx).Mapper return i.Mapper
default: default:
return mapper() return mapper()
} }
@ -471,8 +471,6 @@ func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
s = v.Stmt s = v.Stmt
case *Stmt: case *Stmt:
s = v.Stmt s = v.Stmt
case sql.Stmt:
s = &v
case *sql.Stmt: case *sql.Stmt:
s = v s = v
default: default:

View File

@ -217,8 +217,6 @@ func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
s = v.Stmt s = v.Stmt
case *Stmt: case *Stmt:
s = v.Stmt s = v.Stmt
case sql.Stmt:
s = &v
case *sql.Stmt: case *sql.Stmt:
s = v s = v
default: default:

2
vendor/github.com/lib/pq/README.md generated vendored
View File

@ -10,7 +10,7 @@
## Docs ## Docs
For detailed documentation and basic usage examples, please see the package For detailed documentation and basic usage examples, please see the package
documentation at <http://godoc.org/github.com/lib/pq>. documentation at <https://godoc.org/github.com/lib/pq>.
## Tests ## Tests

View File

@ -1,5 +1,3 @@
// +build go1.8
package pq package pq
import ( import (

2
vendor/github.com/lib/pq/doc.go generated vendored
View File

@ -239,7 +239,7 @@ for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server. bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at You can find a complete, working example of Listener usage at
http://godoc.org/github.com/lib/pq/example/listen. https://godoc.org/github.com/lib/pq/example/listen.
*/ */
package pq package pq

1
vendor/github.com/lib/pq/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/lib/pq

3
vendor/github.com/lib/pq/notify.go generated vendored
View File

@ -725,6 +725,9 @@ func (l *Listener) Close() error {
} }
l.isClosed = true l.isClosed = true
// Unblock calls to Listen()
l.reconnectCond.Broadcast()
return nil return nil
} }

8
vendor/github.com/lib/pq/ssl.go generated vendored
View File

@ -58,7 +58,13 @@ func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
sslRenegotiation(&tlsConf)
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
return func(conn net.Conn) (net.Conn, error) { return func(conn net.Conn) (net.Conn, error) {
client := tls.Client(conn, &tlsConf) client := tls.Client(conn, &tlsConf)

View File

@ -1,14 +0,0 @@
// +build go1.7
package pq
import "crypto/tls"
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
func sslRenegotiation(conf *tls.Config) {
conf.Renegotiation = tls.RenegotiateFreelyAsClient
}

View File

@ -1,8 +0,0 @@
// +build !go1.7
package pq
import "crypto/tls"
// Renegotiation is not supported by crypto/tls until Go 1.7.
func sslRenegotiation(*tls.Config) {}

21
vendor/github.com/markbates/oncer/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2018 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

40
vendor/github.com/markbates/oncer/Makefile generated vendored Normal file
View File

@ -0,0 +1,40 @@
TAGS ?= "sqlite"
GO_BIN ?= go
install:
packr
$(GO_BIN) install -v .
deps:
$(GO_BIN) get github.com/gobuffalo/release
$(GO_BIN) get github.com/gobuffalo/packr/packr
$(GO_BIN) get -tags ${TAGS} -t ./...
$(GO_BIN) mod tidy
build:
packr
$(GO_BIN) build -v .
test:
packr
$(GO_BIN) test -tags ${TAGS} ./...
ci-test: deps
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
gometalinter --vendor ./... --deadline=1m --skip=internal
update:
$(GO_BIN) get -u -tags ${TAGS}
$(GO_BIN) mod tidy
packr
make test
make install
$(GO_BIN) mod tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
release:
release -y -f version.go

20
vendor/github.com/markbates/oncer/deprecate.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package oncer
import (
"fmt"
"io"
"os"
)
const deprecated = "DEPRECATED"
var deprecationWriter io.Writer = os.Stdout
func Deprecate(depth int, name string, msg string) {
Do(deprecated+name, func() {
fmt.Fprintf(deprecationWriter, "[%s] %s has been deprecated.\n", deprecated, name)
if len(msg) > 0 {
fmt.Fprintf(deprecationWriter, "\t%s\n", msg)
}
})
}

7
vendor/github.com/markbates/oncer/go.mod generated vendored Normal file
View File

@ -0,0 +1,7 @@
module github.com/markbates/oncer
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

6
vendor/github.com/markbates/oncer/go.sum generated vendored Normal file
View File

@ -0,0 +1,6 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

26
vendor/github.com/markbates/oncer/oncer.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package oncer
import (
"sync"
)
var onces = &sync.Map{}
func Do(name string, fn func()) {
o, _ := onces.LoadOrStore(name, &sync.Once{})
if once, ok := o.(*sync.Once); ok {
once.Do(fn)
}
}
func Reset(names ...string) {
if len(names) == 0 {
onces = &sync.Map{}
return
}
for _, n := range names {
onces.Delete(n)
onces.Delete(deprecated + n)
}
}

View File

@ -10,9 +10,7 @@ go-sqlite3
sqlite3 driver conforming to the built-in database/sql interface sqlite3 driver conforming to the built-in database/sql interface
Supported Golang version: Supported Golang version: See .travis.yml
- 1.9.x
- 1.10.x
[This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy) [This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy)
@ -67,6 +65,7 @@ This is also known as a DSN string. (Data Source Name).
Options are append after the filename of the SQLite database. Options are append after the filename of the SQLite database.
The database filename and options are seperated by an `?` (Question Mark). The database filename and options are seperated by an `?` (Question Mark).
Options should be URL-encoded (see [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)).
This also applies when using an in-memory database instead of a file. This also applies when using an in-memory database instead of a file.
@ -198,7 +197,7 @@ Additional information:
# Google Cloud Platform # Google Cloud Platform
Building on GCP is not possible because `Google Cloud Platform does not allow `gcc` to be executed. Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed.
Please work only with compiled final binaries. Please work only with compiled final binaries.
@ -290,7 +289,7 @@ For example the TDM-GCC Toolchain can be found [here](ttps://sourceforge.net/pro
When receiving a compile time error referencing recompile with `-FPIC` then you When receiving a compile time error referencing recompile with `-FPIC` then you
are probably using a hardend system. are probably using a hardend system.
You can copile the library on a hardend system with the following command. You can compile the library on a hardend system with the following command.
```bash ```bash
go build -ldflags '-extldflags=-fno-PIC' go build -ldflags '-extldflags=-fno-PIC'

View File

@ -77,6 +77,12 @@ func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, row
callback(op, C.GoString(db), C.GoString(table), rowid) callback(op, C.GoString(db), C.GoString(table), rowid)
} }
//export authorizerTrampoline
func authorizerTrampoline(handle uintptr, op int, arg1 *C.char, arg2 *C.char, arg3 *C.char) int {
callback := lookupHandle(handle).(func(int, string, string, string) int)
return callback(op, C.GoString(arg1), C.GoString(arg2), C.GoString(arg3))
}
// Use handles to avoid passing Go pointers to C. // Use handles to avoid passing Go pointers to C.
type handleVal struct { type handleVal struct {
@ -362,7 +368,7 @@ func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
func callbackError(ctx *C.sqlite3_context, err error) { func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error()) cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr)) defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, -1) C.sqlite3_result_error(ctx, cstr, C.int(-1))
} }
// Test support code. Tests are not allowed to import "C", so we can't // Test support code. Tests are not allowed to import "C", so we can't

File diff suppressed because it is too large Load Diff

View File

@ -124,9 +124,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()]. ** [sqlite_version()] and [sqlite_source_id()].
*/ */
#define SQLITE_VERSION "3.24.0" #define SQLITE_VERSION "3.25.2"
#define SQLITE_VERSION_NUMBER 3024000 #define SQLITE_VERSION_NUMBER 3025002
#define SQLITE_SOURCE_ID "2018-06-04 19:24:41 c7ee0833225bfd8c5ec2f9bf62b97c4e04d03bd9566366d5221ac8fb199a87ca" #define SQLITE_SOURCE_ID "2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7"
/* /*
** CAPI3REF: Run-Time Library Version Numbers ** CAPI3REF: Run-Time Library Version Numbers
@ -473,6 +473,7 @@ SQLITE_API int sqlite3_exec(
*/ */
#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8)) #define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8))
#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8)) #define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8))
#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8))
#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) #define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) #define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) #define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
@ -512,6 +513,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) #define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) #define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) #define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */
#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) #define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8)) #define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8))
#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) #define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
@ -887,7 +889,8 @@ struct sqlite3_io_methods {
** <li>[[SQLITE_FCNTL_PERSIST_WAL]] ** <li>[[SQLITE_FCNTL_PERSIST_WAL]]
** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the ** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the
** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary ** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary
** write ahead log and shared memory files used for transaction control ** write ahead log ([WAL file]) and shared memory
** files used for transaction control
** are automatically deleted when the latest connection to the database ** are automatically deleted when the latest connection to the database
** closes. Setting persistent WAL mode causes those files to persist after ** closes. Setting persistent WAL mode causes those files to persist after
** close. Persisting the files is useful when other processes that do not ** close. Persisting the files is useful when other processes that do not
@ -1073,6 +1076,26 @@ struct sqlite3_io_methods {
** a file lock using the xLock or xShmLock methods of the VFS to wait ** a file lock using the xLock or xShmLock methods of the VFS to wait
** for up to M milliseconds before failing, where M is the single ** for up to M milliseconds before failing, where M is the single
** unsigned integer parameter. ** unsigned integer parameter.
**
** <li>[[SQLITE_FCNTL_DATA_VERSION]]
** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to
** a database file. The argument is a pointer to a 32-bit unsigned integer.
** The "data version" for the pager is written into the pointer. The
** "data version" changes whenever any change occurs to the corresponding
** database file, either through SQL statements on the same database
** connection or through transactions committed by separate database
** connections possibly in other processes. The [sqlite3_total_changes()]
** interface can be used to find if any database on the connection has changed,
** but that interface responds to changes on TEMP as well as MAIN and does
** not provide a mechanism to detect changes to MAIN only. Also, the
** [sqlite3_total_changes()] interface responds to internal changes only and
** omits changes made by other database connections. The
** [PRAGMA data_version] command provide a mechanism to detect changes to
** a single attached database that occur due to other database connections,
** but omits changes implemented by the database connection on which it is
** called. This file control is the only mechanism to detect changes that
** happen either internally or externally and that are associated with
** a particular attached database.
** </ul> ** </ul>
*/ */
#define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_FCNTL_LOCKSTATE 1
@ -1108,6 +1131,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32 #define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32
#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33 #define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
#define SQLITE_FCNTL_LOCK_TIMEOUT 34 #define SQLITE_FCNTL_LOCK_TIMEOUT 34
#define SQLITE_FCNTL_DATA_VERSION 35
/* deprecated names */ /* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@ -2122,6 +2146,12 @@ struct sqlite3_mem_methods {
** with no schema and no content. The following process works even for ** with no schema and no content. The following process works even for
** a badly corrupted database file: ** a badly corrupted database file:
** <ol> ** <ol>
** <li> If the database connection is newly opened, make sure it has read the
** database schema by preparing then discarding some query against the
** database, or calling sqlite3_table_column_metadata(), ignoring any
** errors. This step is only necessary if the application desires to keep
** the database in WAL mode after the reset if it was in WAL mode before
** the reset.
** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0); ** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0);
** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0); ** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);
** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
@ -2270,12 +2300,17 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64);
** program, the value returned reflects the number of rows modified by the ** program, the value returned reflects the number of rows modified by the
** previous INSERT, UPDATE or DELETE statement within the same trigger. ** previous INSERT, UPDATE or DELETE statement within the same trigger.
** **
** See also the [sqlite3_total_changes()] interface, the
** [count_changes pragma], and the [changes() SQL function].
**
** If a separate thread makes changes on the same database connection ** If a separate thread makes changes on the same database connection
** while [sqlite3_changes()] is running then the value returned ** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful. ** is unpredictable and not meaningful.
**
** See also:
** <ul>
** <li> the [sqlite3_total_changes()] interface
** <li> the [count_changes pragma]
** <li> the [changes() SQL function]
** <li> the [data_version pragma]
** </ul>
*/ */
SQLITE_API int sqlite3_changes(sqlite3*); SQLITE_API int sqlite3_changes(sqlite3*);
@ -2293,13 +2328,26 @@ SQLITE_API int sqlite3_changes(sqlite3*);
** count, but those made as part of REPLACE constraint resolution are ** count, but those made as part of REPLACE constraint resolution are
** not. ^Changes to a view that are intercepted by INSTEAD OF triggers ** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
** are not counted. ** are not counted.
**
** See also the [sqlite3_changes()] interface, the
** [count_changes pragma], and the [total_changes() SQL function].
** **
** This the [sqlite3_total_changes(D)] interface only reports the number
** of rows that changed due to SQL statement run against database
** connection D. Any changes by other database connections are ignored.
** To detect changes against a database file from other database
** connections use the [PRAGMA data_version] command or the
** [SQLITE_FCNTL_DATA_VERSION] [file control].
**
** If a separate thread makes changes on the same database connection ** If a separate thread makes changes on the same database connection
** while [sqlite3_total_changes()] is running then the value ** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful. ** returned is unpredictable and not meaningful.
**
** See also:
** <ul>
** <li> the [sqlite3_changes()] interface
** <li> the [count_changes pragma]
** <li> the [changes() SQL function]
** <li> the [data_version pragma]
** <li> the [SQLITE_FCNTL_DATA_VERSION] [file control]
** </ul>
*/ */
SQLITE_API int sqlite3_total_changes(sqlite3*); SQLITE_API int sqlite3_total_changes(sqlite3*);
@ -3355,13 +3403,24 @@ SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int
** [database connection] D failed, then the sqlite3_errcode(D) interface ** [database connection] D failed, then the sqlite3_errcode(D) interface
** returns the numeric [result code] or [extended result code] for that ** returns the numeric [result code] or [extended result code] for that
** API call. ** API call.
** If the most recent API call was successful,
** then the return value from sqlite3_errcode() is undefined.
** ^The sqlite3_extended_errcode() ** ^The sqlite3_extended_errcode()
** interface is the same except that it always returns the ** interface is the same except that it always returns the
** [extended result code] even when extended result codes are ** [extended result code] even when extended result codes are
** disabled. ** disabled.
** **
** The values returned by sqlite3_errcode() and/or
** sqlite3_extended_errcode() might change with each API call.
** Except, there are some interfaces that are guaranteed to never
** change the value of the error code. The error-code preserving
** interfaces are:
**
** <ul>
** <li> sqlite3_errcode()
** <li> sqlite3_extended_errcode()
** <li> sqlite3_errmsg()
** <li> sqlite3_errmsg16()
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
** text that describes the error, as either UTF-8 or UTF-16 respectively. ** text that describes the error, as either UTF-8 or UTF-16 respectively.
** ^(Memory to hold the error message string is managed internally. ** ^(Memory to hold the error message string is managed internally.
@ -4515,11 +4574,25 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into ** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
** [sqlite3_free()]. ** [sqlite3_free()].
** **
** ^(If a memory allocation error occurs during the evaluation of any ** As long as the input parameters are correct, these routines will only
** of these routines, a default value is returned. The default value ** fail if an out-of-memory error occurs during a format conversion.
** is either the integer 0, the floating point number 0.0, or a NULL ** Only the following subset of interfaces are subject to out-of-memory
** pointer. Subsequent calls to [sqlite3_errcode()] will return ** errors:
** [SQLITE_NOMEM].)^ **
** <ul>
** <li> sqlite3_column_blob()
** <li> sqlite3_column_text()
** <li> sqlite3_column_text16()
** <li> sqlite3_column_bytes()
** <li> sqlite3_column_bytes16()
** </ul>
**
** If an out-of-memory error occurs, then the return value from these
** routines is the same as if the column had contained an SQL NULL value.
** Valid SQL NULL returns can be distinguished from out-of-memory errors
** by invoking the [sqlite3_errcode()] immediately after the suspect
** return value is obtained and before any
** other SQLite interface is called on the same [database connection].
*/ */
SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
@ -4596,11 +4669,13 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
** **
** ^These functions (collectively known as "function creation routines") ** ^These functions (collectively known as "function creation routines")
** are used to add SQL functions or aggregates or to redefine the behavior ** are used to add SQL functions or aggregates or to redefine the behavior
** of existing SQL functions or aggregates. The only differences between ** of existing SQL functions or aggregates. The only differences between
** these routines are the text encoding expected for ** the three "sqlite3_create_function*" routines are the text encoding
** the second parameter (the name of the function being created) ** expected for the second parameter (the name of the function being
** and the presence or absence of a destructor callback for ** created) and the presence or absence of a destructor callback for
** the application data pointer. ** the application data pointer. Function sqlite3_create_window_function()
** is similar, but allows the user to supply the extra callback functions
** needed by [aggregate window functions].
** **
** ^The first parameter is the [database connection] to which the SQL ** ^The first parameter is the [database connection] to which the SQL
** function is to be added. ^If an application uses more than one database ** function is to be added. ^If an application uses more than one database
@ -4646,7 +4721,8 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
** ^(The fifth parameter is an arbitrary pointer. The implementation of the ** ^(The fifth parameter is an arbitrary pointer. The implementation of the
** function can gain access to this pointer using [sqlite3_user_data()].)^ ** function can gain access to this pointer using [sqlite3_user_data()].)^
** **
** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are ** ^The sixth, seventh and eighth parameters passed to the three
** "sqlite3_create_function*" functions, xFunc, xStep and xFinal, are
** pointers to C-language functions that implement the SQL function or ** pointers to C-language functions that implement the SQL function or
** aggregate. ^A scalar SQL function requires an implementation of the xFunc ** aggregate. ^A scalar SQL function requires an implementation of the xFunc
** callback only; NULL pointers must be passed as the xStep and xFinal ** callback only; NULL pointers must be passed as the xStep and xFinal
@ -4655,15 +4731,24 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
** SQL function or aggregate, pass NULL pointers for all three function ** SQL function or aggregate, pass NULL pointers for all three function
** callbacks. ** callbacks.
** **
** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, ** ^The sixth, seventh, eighth and ninth parameters (xStep, xFinal, xValue
** then it is destructor for the application data pointer. ** and xInverse) passed to sqlite3_create_window_function are pointers to
** The destructor is invoked when the function is deleted, either by being ** C-language callbacks that implement the new function. xStep and xFinal
** overloaded or when the database connection closes.)^ ** must both be non-NULL. xValue and xInverse may either both be NULL, in
** ^The destructor is also invoked if the call to ** which case a regular aggregate function is created, or must both be
** sqlite3_create_function_v2() fails. ** non-NULL, in which case the new function may be used as either an aggregate
** ^When the destructor callback of the tenth parameter is invoked, it ** or aggregate window function. More details regarding the implementation
** is passed a single argument which is a copy of the application data ** of aggregate window functions are
** pointer which was the fifth parameter to sqlite3_create_function_v2(). ** [user-defined window functions|available here].
**
** ^(If the final parameter to sqlite3_create_function_v2() or
** sqlite3_create_window_function() is not NULL, then it is destructor for
** the application data pointer. The destructor is invoked when the function
** is deleted, either by being overloaded or when the database connection
** closes.)^ ^The destructor is also invoked if the call to
** sqlite3_create_function_v2() fails. ^When the destructor callback is
** invoked, it is passed a single argument which is a copy of the application
** data pointer which was the fifth parameter to sqlite3_create_function_v2().
** **
** ^It is permitted to register multiple implementations of the same ** ^It is permitted to register multiple implementations of the same
** functions with the same name but with either differing numbers of ** functions with the same name but with either differing numbers of
@ -4716,6 +4801,18 @@ SQLITE_API int sqlite3_create_function_v2(
void (*xFinal)(sqlite3_context*), void (*xFinal)(sqlite3_context*),
void(*xDestroy)(void*) void(*xDestroy)(void*)
); );
SQLITE_API int sqlite3_create_window_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
int eTextRep,
void *pApp,
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*),
void (*xValue)(sqlite3_context*),
void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
void(*xDestroy)(void*)
);
/* /*
** CAPI3REF: Text Encodings ** CAPI3REF: Text Encodings
@ -4858,6 +4955,28 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** **
** These routines must be called from the same thread as ** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters. ** the SQL function that supplied the [sqlite3_value*] parameters.
**
** As long as the input parameter is correct, these routines can only
** fail if an out-of-memory error occurs during a format conversion.
** Only the following subset of interfaces are subject to out-of-memory
** errors:
**
** <ul>
** <li> sqlite3_value_blob()
** <li> sqlite3_value_text()
** <li> sqlite3_value_text16()
** <li> sqlite3_value_text16le()
** <li> sqlite3_value_text16be()
** <li> sqlite3_value_bytes()
** <li> sqlite3_value_bytes16()
** </ul>
**
** If an out-of-memory error occurs, then the return value from these
** routines is the same as if the column had contained an SQL NULL value.
** Valid SQL NULL returns can be distinguished from out-of-memory errors
** by invoking the [sqlite3_errcode()] immediately after the suspect
** return value is obtained and before any
** other SQLite interface is called on the same [database connection].
*/ */
SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
SQLITE_API double sqlite3_value_double(sqlite3_value*); SQLITE_API double sqlite3_value_double(sqlite3_value*);
@ -6324,6 +6443,7 @@ struct sqlite3_index_info {
#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 #define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70
#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 #define SQLITE_INDEX_CONSTRAINT_ISNULL 71
#define SQLITE_INDEX_CONSTRAINT_IS 72 #define SQLITE_INDEX_CONSTRAINT_IS 72
#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150
/* /*
** CAPI3REF: Register A Virtual Table Implementation ** CAPI3REF: Register A Virtual Table Implementation
@ -7000,6 +7120,7 @@ SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/* /*
** CAPI3REF: Low-Level Control Of Database Files ** CAPI3REF: Low-Level Control Of Database Files
** METHOD: sqlite3 ** METHOD: sqlite3
** KEYWORDS: {file control}
** **
** ^The [sqlite3_file_control()] interface makes a direct call to the ** ^The [sqlite3_file_control()] interface makes a direct call to the
** xFileControl method for the [sqlite3_io_methods] object associated ** xFileControl method for the [sqlite3_io_methods] object associated
@ -7014,11 +7135,18 @@ SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
** the xFileControl method. ^The return value of the xFileControl ** the xFileControl method. ^The return value of the xFileControl
** method becomes the return value of this routine. ** method becomes the return value of this routine.
** **
** A few opcodes for [sqlite3_file_control()] are handled directly
** by the SQLite core and never invoke the
** sqlite3_io_methods.xFileControl method.
** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes ** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes
** a pointer to the underlying [sqlite3_file] object to be written into ** a pointer to the underlying [sqlite3_file] object to be written into
** the space pointed to by the 4th parameter. ^The [SQLITE_FCNTL_FILE_POINTER] ** the space pointed to by the 4th parameter. The
** case is a short-circuit path which does not actually invoke the ** [SQLITE_FCNTL_JOURNAL_POINTER] works similarly except that it returns
** underlying sqlite3_io_methods.xFileControl method. ** the [sqlite3_file] object associated with the journal file instead of
** the main database. The [SQLITE_FCNTL_VFS_POINTER] opcode returns
** a pointer to the underlying [sqlite3_vfs] object for the file.
** The [SQLITE_FCNTL_DATA_VERSION] returns the data version counter
** from the pager.
** **
** ^If the second parameter (zDbName) does not match the name of any ** ^If the second parameter (zDbName) does not match the name of any
** open database file, then SQLITE_ERROR is returned. ^This error ** open database file, then SQLITE_ERROR is returned. ^This error
@ -8837,7 +8965,6 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
/* /*
** CAPI3REF: Database Snapshot ** CAPI3REF: Database Snapshot
** KEYWORDS: {snapshot} {sqlite3_snapshot} ** KEYWORDS: {snapshot} {sqlite3_snapshot}
** EXPERIMENTAL
** **
** An instance of the snapshot object records the state of a [WAL mode] ** An instance of the snapshot object records the state of a [WAL mode]
** database for some specific point in history. ** database for some specific point in history.
@ -8854,11 +8981,6 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
** version of the database file so that it is possible to later open a new read ** version of the database file so that it is possible to later open a new read
** transaction that sees that historical version of the database rather than ** transaction that sees that historical version of the database rather than
** the most recent version. ** the most recent version.
**
** The constructor for this object is [sqlite3_snapshot_get()]. The
** [sqlite3_snapshot_open()] method causes a fresh read transaction to refer
** to an historical snapshot (if possible). The destructor for
** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
*/ */
typedef struct sqlite3_snapshot { typedef struct sqlite3_snapshot {
unsigned char hidden[48]; unsigned char hidden[48];
@ -8866,7 +8988,7 @@ typedef struct sqlite3_snapshot {
/* /*
** CAPI3REF: Record A Database Snapshot ** CAPI3REF: Record A Database Snapshot
** EXPERIMENTAL ** CONSTRUCTOR: sqlite3_snapshot
** **
** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a ** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
** new [sqlite3_snapshot] object that records the current state of ** new [sqlite3_snapshot] object that records the current state of
@ -8882,7 +9004,7 @@ typedef struct sqlite3_snapshot {
** in this case. ** in this case.
** **
** <ul> ** <ul>
** <li> The database handle must be in [autocommit mode]. ** <li> The database handle must not be in [autocommit mode].
** **
** <li> Schema S of [database connection] D must be a [WAL mode] database. ** <li> Schema S of [database connection] D must be a [WAL mode] database.
** **
@ -8905,7 +9027,7 @@ typedef struct sqlite3_snapshot {
** to avoid a memory leak. ** to avoid a memory leak.
** **
** The [sqlite3_snapshot_get()] interface is only available when the ** The [sqlite3_snapshot_get()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used. ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
*/ */
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
sqlite3 *db, sqlite3 *db,
@ -8915,24 +9037,35 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
/* /*
** CAPI3REF: Start a read transaction on an historical snapshot ** CAPI3REF: Start a read transaction on an historical snapshot
** EXPERIMENTAL ** METHOD: sqlite3_snapshot
** **
** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a ** ^The [sqlite3_snapshot_open(D,S,P)] interface either starts a new read
** read transaction for schema S of ** transaction or upgrades an existing one for schema S of
** [database connection] D such that the read transaction ** [database connection] D such that the read transaction refers to
** refers to historical [snapshot] P, rather than the most ** historical [snapshot] P, rather than the most recent change to the
** recent change to the database. ** database. ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK
** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success ** on success or an appropriate [error code] if it fails.
** or an appropriate [error code] if it fails. **
** ^In order to succeed, the database connection must not be in
** [autocommit mode] when [sqlite3_snapshot_open(D,S,P)] is called. If there
** is already a read transaction open on schema S, then the database handle
** must have no active statements (SELECT statements that have been passed
** to sqlite3_step() but not sqlite3_reset() or sqlite3_finalize()).
** SQLITE_ERROR is returned if either of these conditions is violated, or
** if schema S does not exist, or if the snapshot object is invalid.
**
** ^A call to sqlite3_snapshot_open() will fail to open if the specified
** snapshot has been overwritten by a [checkpoint]. In this case
** SQLITE_ERROR_SNAPSHOT is returned.
**
** If there is already a read transaction open when this function is
** invoked, then the same read transaction remains open (on the same
** database snapshot) if SQLITE_ERROR, SQLITE_BUSY or SQLITE_ERROR_SNAPSHOT
** is returned. If another error code - for example SQLITE_PROTOCOL or an
** SQLITE_IOERR error code - is returned, then the final state of the
** read transaction is undefined. If SQLITE_OK is returned, then the
** read transaction is now open on database snapshot P.
** **
** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be
** the first operation following the [BEGIN] that takes the schema S
** out of [autocommit mode].
** ^In other words, schema S must not currently be in
** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the
** database connection D must be out of [autocommit mode].
** ^A [snapshot] will fail to open if it has been overwritten by a
** [checkpoint].
** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the ** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the
** database connection D does not know that the database file for ** database connection D does not know that the database file for
** schema S is in [WAL mode]. A database connection might not know ** schema S is in [WAL mode]. A database connection might not know
@ -8943,7 +9076,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
** database connection in order to make it ready to use snapshots.) ** database connection in order to make it ready to use snapshots.)
** **
** The [sqlite3_snapshot_open()] interface is only available when the ** The [sqlite3_snapshot_open()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used. ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
*/ */
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
sqlite3 *db, sqlite3 *db,
@ -8953,20 +9086,20 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
/* /*
** CAPI3REF: Destroy a snapshot ** CAPI3REF: Destroy a snapshot
** EXPERIMENTAL ** DESTRUCTOR: sqlite3_snapshot
** **
** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P. ** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
** The application must eventually free every [sqlite3_snapshot] object ** The application must eventually free every [sqlite3_snapshot] object
** using this routine to avoid a memory leak. ** using this routine to avoid a memory leak.
** **
** The [sqlite3_snapshot_free()] interface is only available when the ** The [sqlite3_snapshot_free()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used. ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
*/ */
SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
/* /*
** CAPI3REF: Compare the ages of two snapshot handles. ** CAPI3REF: Compare the ages of two snapshot handles.
** EXPERIMENTAL ** METHOD: sqlite3_snapshot
** **
** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages ** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages
** of two valid snapshot handles. ** of two valid snapshot handles.
@ -8985,6 +9118,9 @@ SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
** Otherwise, this API returns a negative value if P1 refers to an older ** Otherwise, this API returns a negative value if P1 refers to an older
** snapshot than P2, zero if the two handles refer to the same database ** snapshot than P2, zero if the two handles refer to the same database
** snapshot, and a positive value if P1 is a newer snapshot than P2. ** snapshot, and a positive value if P1 is a newer snapshot than P2.
**
** This interface is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SNAPSHOT] option.
*/ */
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
sqlite3_snapshot *p1, sqlite3_snapshot *p1,
@ -8993,23 +9129,26 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
/* /*
** CAPI3REF: Recover snapshots from a wal file ** CAPI3REF: Recover snapshots from a wal file
** EXPERIMENTAL ** METHOD: sqlite3_snapshot
** **
** If all connections disconnect from a database file but do not perform ** If a [WAL file] remains on disk after all database connections close
** a checkpoint, the existing wal file is opened along with the database ** (either through the use of the [SQLITE_FCNTL_PERSIST_WAL] [file control]
** file the next time the database is opened. At this point it is only ** or because the last process to have the database opened exited without
** possible to successfully call sqlite3_snapshot_open() to open the most ** calling [sqlite3_close()]) and a new connection is subsequently opened
** recent snapshot of the database (the one at the head of the wal file), ** on that database and [WAL file], the [sqlite3_snapshot_open()] interface
** even though the wal file may contain other valid snapshots for which ** will only be able to open the last transaction added to the WAL file
** clients have sqlite3_snapshot handles. ** even though the WAL file contains other valid transactions.
** **
** This function attempts to scan the wal file associated with database zDb ** This function attempts to scan the WAL file associated with database zDb
** of database handle db and make all valid snapshots available to ** of database handle db and make all valid snapshots available to
** sqlite3_snapshot_open(). It is an error if there is already a read ** sqlite3_snapshot_open(). It is an error if there is already a read
** transaction open on the database, or if the database is not a wal mode ** transaction open on the database, or if the database is not a WAL mode
** database. ** database.
** **
** SQLITE_OK is returned if successful, or an SQLite error code otherwise. ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
**
** This interface is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SNAPSHOT] option.
*/ */
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
@ -9120,7 +9259,7 @@ SQLITE_API int sqlite3_deserialize(
** in the P argument is held in memory obtained from [sqlite3_malloc64()] ** in the P argument is held in memory obtained from [sqlite3_malloc64()]
** and that SQLite should take ownership of this memory and automatically ** and that SQLite should take ownership of this memory and automatically
** free it when it has finished using it. Without this flag, the caller ** free it when it has finished using it. Without this flag, the caller
** is resposible for freeing any dynamically allocated memory. ** is responsible for freeing any dynamically allocated memory.
** **
** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to ** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to
** grow the size of the database using calls to [sqlite3_realloc64()]. This ** grow the size of the database using calls to [sqlite3_realloc64()]. This
@ -11298,7 +11437,7 @@ struct Fts5ExtensionApi {
** This way, even if the tokenizer does not provide synonyms ** This way, even if the tokenizer does not provide synonyms
** when tokenizing query text (it should not - to do would be ** when tokenizing query text (it should not - to do would be
** inefficient), it doesn't matter if the user queries for ** inefficient), it doesn't matter if the user queries for
** 'first + place' or '1st + place', as there are entires in the ** 'first + place' or '1st + place', as there are entries in the
** FTS index corresponding to both forms of the first token. ** FTS index corresponding to both forms of the first token.
** </ol> ** </ol>
** **
@ -11326,7 +11465,7 @@ struct Fts5ExtensionApi {
** extra data to the FTS index or require FTS5 to query for multiple terms, ** extra data to the FTS index or require FTS5 to query for multiple terms,
** so it is efficient in terms of disk space and query speed. However, it ** so it is efficient in terms of disk space and query speed. However, it
** does not support prefix queries very well. If, as suggested above, the ** does not support prefix queries very well. If, as suggested above, the
** token "first" is subsituted for "1st" by the tokenizer, then the query: ** token "first" is substituted for "1st" by the tokenizer, then the query:
** **
** <codeblock> ** <codeblock>
** ... MATCH '1s*'</codeblock> ** ... MATCH '1s*'</codeblock>

View File

@ -78,8 +78,38 @@ _sqlite3_exec(sqlite3* db, const char* pcmd, long long* rowid, long long* change
return rv; return rv;
} }
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
extern int _sqlite3_step_blocking(sqlite3_stmt *stmt);
extern int _sqlite3_step_row_blocking(sqlite3_stmt* stmt, long long* rowid, long long* changes);
extern int _sqlite3_prepare_v2_blocking(sqlite3 *db, const char *zSql, int nBytes, sqlite3_stmt **ppStmt, const char **pzTail);
static int static int
_sqlite3_step(sqlite3_stmt* stmt, long long* rowid, long long* changes) _sqlite3_step_internal(sqlite3_stmt *stmt)
{
return _sqlite3_step_blocking(stmt);
}
static int
_sqlite3_step_row_internal(sqlite3_stmt* stmt, long long* rowid, long long* changes)
{
return _sqlite3_step_row_blocking(stmt, rowid, changes);
}
static int
_sqlite3_prepare_v2_internal(sqlite3 *db, const char *zSql, int nBytes, sqlite3_stmt **ppStmt, const char **pzTail)
{
return _sqlite3_prepare_v2_blocking(db, zSql, nBytes, ppStmt, pzTail);
}
#else
static int
_sqlite3_step_internal(sqlite3_stmt *stmt)
{
return sqlite3_step(stmt);
}
static int
_sqlite3_step_row_internal(sqlite3_stmt* stmt, long long* rowid, long long* changes)
{ {
int rv = sqlite3_step(stmt); int rv = sqlite3_step(stmt);
sqlite3* db = sqlite3_db_handle(stmt); sqlite3* db = sqlite3_db_handle(stmt);
@ -88,6 +118,13 @@ _sqlite3_step(sqlite3_stmt* stmt, long long* rowid, long long* changes)
return rv; return rv;
} }
static int
_sqlite3_prepare_v2_internal(sqlite3 *db, const char *zSql, int nBytes, sqlite3_stmt **ppStmt, const char **pzTail)
{
return sqlite3_prepare_v2(db, zSql, nBytes, ppStmt, pzTail);
}
#endif
void _sqlite3_result_text(sqlite3_context* ctx, const char* s) { void _sqlite3_result_text(sqlite3_context* ctx, const char* s) {
sqlite3_result_text(ctx, s, -1, &free); sqlite3_result_text(ctx, s, -1, &free);
} }
@ -119,6 +156,8 @@ int commitHookTrampoline(void*);
void rollbackHookTrampoline(void*); void rollbackHookTrampoline(void*);
void updateHookTrampoline(void*, int, char*, char*, sqlite3_int64); void updateHookTrampoline(void*, int, char*, char*, sqlite3_int64);
int authorizerTrampoline(void*, int, char*, char*, char*, char*);
#ifdef SQLITE_LIMIT_WORKER_THREADS #ifdef SQLITE_LIMIT_WORKER_THREADS
# define _SQLITE_HAS_LIMIT # define _SQLITE_HAS_LIMIT
# define SQLITE_LIMIT_LENGTH 0 # define SQLITE_LIMIT_LENGTH 0
@ -200,18 +239,57 @@ func Version() (libVersion string, libVersionNumber int, sourceID string) {
} }
const ( const (
// used by authorizer and pre_update_hook
SQLITE_DELETE = C.SQLITE_DELETE SQLITE_DELETE = C.SQLITE_DELETE
SQLITE_INSERT = C.SQLITE_INSERT SQLITE_INSERT = C.SQLITE_INSERT
SQLITE_UPDATE = C.SQLITE_UPDATE SQLITE_UPDATE = C.SQLITE_UPDATE
// used by authorzier - as return value
SQLITE_OK = C.SQLITE_OK
SQLITE_IGNORE = C.SQLITE_IGNORE
SQLITE_DENY = C.SQLITE_DENY
// different actions query tries to do - passed as argument to authorizer
SQLITE_CREATE_INDEX = C.SQLITE_CREATE_INDEX
SQLITE_CREATE_TABLE = C.SQLITE_CREATE_TABLE
SQLITE_CREATE_TEMP_INDEX = C.SQLITE_CREATE_TEMP_INDEX
SQLITE_CREATE_TEMP_TABLE = C.SQLITE_CREATE_TEMP_TABLE
SQLITE_CREATE_TEMP_TRIGGER = C.SQLITE_CREATE_TEMP_TRIGGER
SQLITE_CREATE_TEMP_VIEW = C.SQLITE_CREATE_TEMP_VIEW
SQLITE_CREATE_TRIGGER = C.SQLITE_CREATE_TRIGGER
SQLITE_CREATE_VIEW = C.SQLITE_CREATE_VIEW
SQLITE_CREATE_VTABLE = C.SQLITE_CREATE_VTABLE
SQLITE_DROP_INDEX = C.SQLITE_DROP_INDEX
SQLITE_DROP_TABLE = C.SQLITE_DROP_TABLE
SQLITE_DROP_TEMP_INDEX = C.SQLITE_DROP_TEMP_INDEX
SQLITE_DROP_TEMP_TABLE = C.SQLITE_DROP_TEMP_TABLE
SQLITE_DROP_TEMP_TRIGGER = C.SQLITE_DROP_TEMP_TRIGGER
SQLITE_DROP_TEMP_VIEW = C.SQLITE_DROP_TEMP_VIEW
SQLITE_DROP_TRIGGER = C.SQLITE_DROP_TRIGGER
SQLITE_DROP_VIEW = C.SQLITE_DROP_VIEW
SQLITE_DROP_VTABLE = C.SQLITE_DROP_VTABLE
SQLITE_PRAGMA = C.SQLITE_PRAGMA
SQLITE_READ = C.SQLITE_READ
SQLITE_SELECT = C.SQLITE_SELECT
SQLITE_TRANSACTION = C.SQLITE_TRANSACTION
SQLITE_ATTACH = C.SQLITE_ATTACH
SQLITE_DETACH = C.SQLITE_DETACH
SQLITE_ALTER_TABLE = C.SQLITE_ALTER_TABLE
SQLITE_REINDEX = C.SQLITE_REINDEX
SQLITE_ANALYZE = C.SQLITE_ANALYZE
SQLITE_FUNCTION = C.SQLITE_FUNCTION
SQLITE_SAVEPOINT = C.SQLITE_SAVEPOINT
SQLITE_COPY = C.SQLITE_COPY
/*SQLITE_RECURSIVE = C.SQLITE_RECURSIVE*/
) )
// SQLiteDriver implement sql.Driver. // SQLiteDriver implements driver.Driver.
type SQLiteDriver struct { type SQLiteDriver struct {
Extensions []string Extensions []string
ConnectHook func(*SQLiteConn) error ConnectHook func(*SQLiteConn) error
} }
// SQLiteConn implement sql.Conn. // SQLiteConn implements driver.Conn.
type SQLiteConn struct { type SQLiteConn struct {
mu sync.Mutex mu sync.Mutex
db *C.sqlite3 db *C.sqlite3
@ -221,12 +299,12 @@ type SQLiteConn struct {
aggregators []*aggInfo aggregators []*aggInfo
} }
// SQLiteTx implemen sql.Tx. // SQLiteTx implements driver.Tx.
type SQLiteTx struct { type SQLiteTx struct {
c *SQLiteConn c *SQLiteConn
} }
// SQLiteStmt implement sql.Stmt. // SQLiteStmt implements driver.Stmt.
type SQLiteStmt struct { type SQLiteStmt struct {
mu sync.Mutex mu sync.Mutex
c *SQLiteConn c *SQLiteConn
@ -236,13 +314,13 @@ type SQLiteStmt struct {
cls bool cls bool
} }
// SQLiteResult implement sql.Result. // SQLiteResult implements sql.Result.
type SQLiteResult struct { type SQLiteResult struct {
id int64 id int64
changes int64 changes int64
} }
// SQLiteRows implement sql.Rows. // SQLiteRows implements driver.Rows.
type SQLiteRows struct { type SQLiteRows struct {
s *SQLiteStmt s *SQLiteStmt
nc int nc int
@ -440,6 +518,20 @@ func (c *SQLiteConn) RegisterUpdateHook(callback func(int, string, string, int64
} }
} }
// RegisterAuthorizer sets the authorizer for connection.
//
// The parameters to the callback are the operation (one of the constants
// SQLITE_INSERT, SQLITE_DELETE, or SQLITE_UPDATE), and 1 to 3 arguments,
// depending on operation. More details see:
// https://www.sqlite.org/c3ref/c_alter_table.html
func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, string) int) {
if callback == nil {
C.sqlite3_set_authorizer(c.db, nil, nil)
} else {
C.sqlite3_set_authorizer(c.db, (*[0]byte)(C.authorizerTrampoline), unsafe.Pointer(newHandle(c, callback)))
}
}
// RegisterFunc makes a Go function available as a SQLite function. // RegisterFunc makes a Go function available as a SQLite function.
// //
// The Go function can have arguments of the following types: any // The Go function can have arguments of the following types: any
@ -1582,7 +1674,7 @@ func (c *SQLiteConn) prepare(ctx context.Context, query string) (driver.Stmt, er
defer C.free(unsafe.Pointer(pquery)) defer C.free(unsafe.Pointer(pquery))
var s *C.sqlite3_stmt var s *C.sqlite3_stmt
var tail *C.char var tail *C.char
rv := C.sqlite3_prepare_v2(c.db, pquery, -1, &s, &tail) rv := C._sqlite3_prepare_v2_internal(c.db, pquery, C.int(-1), &s, &tail)
if rv != C.SQLITE_OK { if rv != C.SQLITE_OK {
return nil, c.lastError() return nil, c.lastError()
} }
@ -1626,7 +1718,7 @@ func (c *SQLiteConn) GetFilename(schemaName string) string {
// GetLimit returns the current value of a run-time limit. // GetLimit returns the current value of a run-time limit.
// See: sqlite3_limit, http://www.sqlite.org/c3ref/limit.html // See: sqlite3_limit, http://www.sqlite.org/c3ref/limit.html
func (c *SQLiteConn) GetLimit(id int) int { func (c *SQLiteConn) GetLimit(id int) int {
return int(C._sqlite3_limit(c.db, C.int(id), -1)) return int(C._sqlite3_limit(c.db, C.int(id), C.int(-1)))
} }
// SetLimit changes the value of a run-time limits. // SetLimit changes the value of a run-time limits.
@ -1816,7 +1908,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []namedValue) (driver.Result
} }
var rowid, changes C.longlong var rowid, changes C.longlong
rv := C._sqlite3_step(s.s, &rowid, &changes) rv := C._sqlite3_step_row_internal(s.s, &rowid, &changes)
if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE { if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
err := s.c.lastError() err := s.c.lastError()
C.sqlite3_reset(s.s) C.sqlite3_reset(s.s)
@ -1888,7 +1980,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
if rc.s.closed { if rc.s.closed {
return io.EOF return io.EOF
} }
rv := C.sqlite3_step(rc.s.s) rv := C._sqlite3_step_internal(rc.s.s)
if rv == C.SQLITE_DONE { if rv == C.SQLITE_DONE {
return io.EOF return io.EOF
} }

View File

@ -83,13 +83,13 @@ func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte
} }
} }
// CryptEncoderSHA384 encodes a password with SHA256 // CryptEncoderSHA384 encodes a password with SHA384
func CryptEncoderSHA384(pass []byte, hash interface{}) []byte { func CryptEncoderSHA384(pass []byte, hash interface{}) []byte {
h := sha512.Sum384(pass) h := sha512.Sum384(pass)
return h[:] return h[:]
} }
// CryptEncoderSSHA384 encodes a password with SHA256 // CryptEncoderSSHA384 encodes a password with SHA384
// with the configured salt // with the configured salt
func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte { func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte {
return func(pass []byte, hash interface{}) []byte { return func(pass []byte, hash interface{}) []byte {
@ -100,13 +100,13 @@ func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte
} }
} }
// CryptEncoderSHA512 encodes a password with SHA256 // CryptEncoderSHA512 encodes a password with SHA512
func CryptEncoderSHA512(pass []byte, hash interface{}) []byte { func CryptEncoderSHA512(pass []byte, hash interface{}) []byte {
h := sha512.Sum512(pass) h := sha512.Sum512(pass)
return h[:] return h[:]
} }
// CryptEncoderSSHA512 encodes a password with SHA256 // CryptEncoderSSHA512 encodes a password with SHA512
// with the configured salt // with the configured salt
func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte { func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte {
return func(pass []byte, hash interface{}) []byte { return func(pass []byte, hash interface{}) []byte {

View File

@ -0,0 +1,85 @@
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdio.h>
#include <sqlite3-binding.h>
extern int unlock_notify_wait(sqlite3 *db);
int
_sqlite3_step_blocking(sqlite3_stmt *stmt)
{
int rv;
sqlite3* db;
db = sqlite3_db_handle(stmt);
for (;;) {
rv = sqlite3_step(stmt);
if (rv != SQLITE_LOCKED) {
break;
}
if (sqlite3_extended_errcode(db) != SQLITE_LOCKED_SHAREDCACHE) {
break;
}
rv = unlock_notify_wait(db);
if (rv != SQLITE_OK) {
break;
}
sqlite3_reset(stmt);
}
return rv;
}
int
_sqlite3_step_row_blocking(sqlite3_stmt* stmt, long long* rowid, long long* changes)
{
int rv;
sqlite3* db;
db = sqlite3_db_handle(stmt);
for (;;) {
rv = sqlite3_step(stmt);
if (rv!=SQLITE_LOCKED) {
break;
}
if (sqlite3_extended_errcode(db) != SQLITE_LOCKED_SHAREDCACHE) {
break;
}
rv = unlock_notify_wait(db);
if (rv != SQLITE_OK) {
break;
}
sqlite3_reset(stmt);
}
*rowid = (long long) sqlite3_last_insert_rowid(db);
*changes = (long long) sqlite3_changes(db);
return rv;
}
int
_sqlite3_prepare_v2_blocking(sqlite3 *db, const char *zSql, int nBytes, sqlite3_stmt **ppStmt, const char **pzTail)
{
int rv;
for (;;) {
rv = sqlite3_prepare_v2(db, zSql, nBytes, ppStmt, pzTail);
if (rv!=SQLITE_LOCKED) {
break;
}
if (sqlite3_extended_errcode(db) != SQLITE_LOCKED_SHAREDCACHE) {
break;
}
rv = unlock_notify_wait(db);
if (rv != SQLITE_OK) {
break;
}
}
return rv;
}
#endif

View File

@ -0,0 +1,93 @@
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build cgo
// +build sqlite_unlock_notify
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdlib.h>
#include <sqlite3-binding.h>
extern void unlock_notify_callback(void *arg, int argc);
*/
import "C"
import (
"fmt"
"math"
"sync"
"unsafe"
)
type unlock_notify_table struct {
sync.Mutex
seqnum uint
table map[uint]chan struct{}
}
var unt unlock_notify_table = unlock_notify_table{table: make(map[uint]chan struct{})}
func (t *unlock_notify_table) add(c chan struct{}) uint {
t.Lock()
defer t.Unlock()
h := t.seqnum
t.table[h] = c
t.seqnum++
return h
}
func (t *unlock_notify_table) remove(h uint) {
t.Lock()
defer t.Unlock()
delete(t.table, h)
}
func (t *unlock_notify_table) get(h uint) chan struct{} {
t.Lock()
defer t.Unlock()
c, ok := t.table[h]
if !ok {
panic(fmt.Sprintf("Non-existent key for unlcok-notify channel: %d", h))
}
return c
}
//export unlock_notify_callback
func unlock_notify_callback(argv unsafe.Pointer, argc C.int) {
for i := 0; i < int(argc); i++ {
parg := ((*(*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.uint)(nil))]*[1]uint)(argv))[i])
arg := *parg
h := arg[0]
c := unt.get(h)
c <- struct{}{}
}
}
//export unlock_notify_wait
func unlock_notify_wait(db *C.sqlite3) C.int {
// It has to be a bufferred channel to not block in sqlite_unlock_notify
// as sqlite_unlock_notify could invoke the callback before it returns.
c := make(chan struct{}, 1)
defer close(c)
h := unt.add(c)
defer unt.remove(h)
pargv := C.malloc(C.sizeof_uint)
defer C.free(pargv)
argv := (*[1]uint)(pargv)
argv[0] = h
if rv := C.sqlite3_unlock_notify(db, (*[0]byte)(C.unlock_notify_callback), unsafe.Pointer(pargv)); rv != C.SQLITE_OK {
return rv
}
<-c
return C.SQLITE_OK
}

View File

@ -311,6 +311,12 @@ struct sqlite3_api_routines {
int (*str_errcode)(sqlite3_str*); int (*str_errcode)(sqlite3_str*);
int (*str_length)(sqlite3_str*); int (*str_length)(sqlite3_str*);
char *(*str_value)(sqlite3_str*); char *(*str_value)(sqlite3_str*);
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*),
void (*xValue)(sqlite3_context*),
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
void(*xDestroy)(void*));
}; };
/* /*
@ -596,6 +602,8 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_str_errcode sqlite3_api->str_errcode #define sqlite3_str_errcode sqlite3_api->str_errcode
#define sqlite3_str_length sqlite3_api->str_length #define sqlite3_str_length sqlite3_api->str_length
#define sqlite3_str_value sqlite3_api->str_value #define sqlite3_str_value sqlite3_api->str_value
/* Version 3.25.0 and later */
#define sqlite3_create_window_function sqlite3_api->create_window_function
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)

View File

@ -280,7 +280,7 @@ func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
// packr.Box that we need. // packr.Box that we need.
type PackrBox interface { type PackrBox interface {
List() []string List() []string
Bytes(name string) []byte Find(name string) ([]byte, error)
} }
// Migrations from a packr box. // Migrations from a packr box.
@ -313,7 +313,10 @@ func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) {
} }
if strings.HasSuffix(name, ".sql") { if strings.HasSuffix(name, ".sql") {
file := p.Box.Bytes(item) file, err := p.Box.Find(item)
if err != nil {
return nil, err
}
migration, err := ParseMigration(name, bytes.NewReader(file)) migration, err := ParseMigration(name, bytes.NewReader(file))
if err != nil { if err != nil {
@ -647,7 +650,8 @@ func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
err := db.QueryRow("SELECT NOW()").Scan(&out) err := db.QueryRow("SELECT NOW()").Scan(&out)
if err != nil { if err != nil {
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" || if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" ||
err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" { err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" ||
err.Error() == "sql: Scan error on column index 0, name \"NOW()\": unsupported Scan, storing driver.Value type []uint8 into type *time.Time" {
return nil, errors.New(`Cannot parse dates. return nil, errors.New(`Cannot parse dates.
Make sure that the parseTime option is supplied to your database connection. Make sure that the parseTime option is supplied to your database connection.

54
vendor/vendor.json vendored
View File

@ -131,16 +131,28 @@
"revision": "" "revision": ""
}, },
{ {
"checksumSHA1": "k9izYoyobR8Upb3Yjsw9htgE/Mk=", "checksumSHA1": "ANMYkuezaW7Pl/JJBiEs0KLzTOg=",
"path": "github.com/go-sql-driver/mysql", "path": "github.com/go-sql-driver/mysql",
"revision": "99ff426eb706cffe92ff3d058e168b278cabf7c7", "revision": "369b5d6e5e8e108ed4ae2f2b1607d444b3807dfb",
"revisionTime": "2018-07-19T07:19:42Z" "revisionTime": "2018-11-13T02:38:49Z"
}, },
{ {
"checksumSHA1": "miq6rtrLoaiB6tqIWAp+gmaPVGk=", "checksumSHA1": "neHnlLCthYNy8yvSWjeFA4JNzrE=",
"path": "github.com/gobuffalo/envy",
"revision": "910ef88c9d32c6e779231577dfcf6ed8959bea2f",
"revisionTime": "2018-11-10T02:19:45Z"
},
{
"checksumSHA1": "W+fA3n3Dj+UNznwcRVmVR5fV/2E=",
"path": "github.com/gobuffalo/packd",
"revision": "b2e760a5f0ff7ebdac92dc43c2af310351dbc03f",
"revisionTime": "2018-11-11T19:53:23Z"
},
{
"checksumSHA1": "3VDhVe92VF1ZEx1W4RDOZNaEub8=",
"path": "github.com/gobuffalo/packr", "path": "github.com/gobuffalo/packr",
"revision": "147bee9cde84aeca6693d1b1aedc595298f24d5b", "revision": "04ea9bbf60d4f0e275bcd529eac2d1055db7396b",
"revisionTime": "2018-07-22T18:22:49Z" "revisionTime": "2018-11-12T17:37:45Z"
}, },
{ {
"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=", "checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
@ -173,10 +185,10 @@
"revisionTime": "2018-06-06T15:52:11Z" "revisionTime": "2018-06-06T15:52:11Z"
}, },
{ {
"checksumSHA1": "F45m6rdNc9pgzu5vWN1n1Rwbk4A=", "checksumSHA1": "SVzanxaLYK43jeNWQqNs5P+uhow=",
"path": "github.com/jmoiron/sqlx", "path": "github.com/jmoiron/sqlx",
"revision": "0dae4fefe7c0e190f7b5a78dac28a1c82cc8d849", "revision": "82935fac6c1a317907c8f43ed3f7f85ea844a78b",
"revisionTime": "2018-06-14T18:06:43Z" "revisionTime": "2018-10-24T16:34:19Z"
}, },
{ {
"checksumSHA1": "bXFrevmVL5Q2EwYlRHlPihxvAJA=", "checksumSHA1": "bXFrevmVL5Q2EwYlRHlPihxvAJA=",
@ -197,10 +209,10 @@
"revisionTime": "2018-05-17T19:45:57Z" "revisionTime": "2018-05-17T19:45:57Z"
}, },
{ {
"checksumSHA1": "s6eyIXpJ7VAU06FM27GcEBM/5lo=", "checksumSHA1": "xxLSo5tKtXc7jGrR70yoEfza8Cw=",
"path": "github.com/lib/pq", "path": "github.com/lib/pq",
"revision": "90697d60dd844d5ef6ff15135d0203f65d2f53b8", "revision": "9eb73efc1fcc404148b56765b0d3f61d9a5ef8ee",
"revisionTime": "2018-05-23T17:54:26Z" "revisionTime": "2018-10-16T16:26:27Z"
}, },
{ {
"checksumSHA1": "ATnwV0POluBNQEMjPdylodz0oK0=", "checksumSHA1": "ATnwV0POluBNQEMjPdylodz0oK0=",
@ -209,10 +221,16 @@
"revisionTime": "2018-05-23T17:54:26Z" "revisionTime": "2018-05-23T17:54:26Z"
}, },
{ {
"checksumSHA1": "tTx90HHLD8waQ+mlpd7kRzp9kyY=", "checksumSHA1": "jCc5Bvs2cAR/ODaRM5aMffO6qEA=",
"path": "github.com/markbates/oncer",
"revision": "05fccaae8fc423476d98fd4c3e4699ba0fbbde48",
"revisionTime": "2018-10-14T19:46:34Z"
},
{
"checksumSHA1": "/a5Jwxjp4yLm6YcsEnxNHch6O70=",
"path": "github.com/mattn/go-sqlite3", "path": "github.com/mattn/go-sqlite3",
"revision": "b3511bfdd742af558b54eb6160aca9446d762a19", "revision": "eed79b86572f203cc92e92048ba06b56a8ca06e8",
"revisionTime": "2018-07-18T00:29:43Z" "revisionTime": "2018-11-09T09:53:04Z"
}, },
{ {
"checksumSHA1": "Byaa390AhImHSUlSXz14tOCRBHo=", "checksumSHA1": "Byaa390AhImHSUlSXz14tOCRBHo=",
@ -227,10 +245,10 @@
"revisionTime": "2018-03-11T21:45:15Z" "revisionTime": "2018-03-11T21:45:15Z"
}, },
{ {
"checksumSHA1": "N11d95+f/kzeTCb408INaeNcPCE=", "checksumSHA1": "0MwBZflejTXJSgMEGbFGpQPhcl4=",
"path": "github.com/rubenv/sql-migrate", "path": "github.com/rubenv/sql-migrate",
"revision": "3f452fc0ebebbb784fdab91f7bc79a31dcacab5c", "revision": "ba2c6a7295c59448dbc195cef2f41df5163b3892",
"revisionTime": "2018-07-04T11:13:56Z" "revisionTime": "2018-11-06T12:12:04Z"
}, },
{ {
"checksumSHA1": "rkHieYOGBsxJaDbC7vl/e4KSatw=", "checksumSHA1": "rkHieYOGBsxJaDbC7vl/e4KSatw=",