use packr to embed migration files & web assets into binary. closes #8

This commit is contained in:
Danny 2018-05-08 14:57:08 +02:00
parent 61753a713c
commit 0683cac2bf
25 changed files with 1407 additions and 68 deletions

View File

@ -56,7 +56,3 @@ $(EXECUTABLE): $(SOURCES)
docker:
docker build -t metalmatze/ana:latest .
.PHONY: assets
assets:
if [ ! -d "node_modules" ]; then npm install; fi
gulp

View File

@ -17,7 +17,7 @@ For getting a development version of Fathom up & running, please go through the
1. compile into binary & prepare assets: `make all`
1. Rename `.env.example` to `.env` and set your database credentials.
1. Create your user account: `fathom register <email> <password>`
1. Start the webserver: `fathom server --webroot=./build --port=8080` & visit **localhost:8080** to access your analytics dashboard.
1. Start the webserver: `fathom server --port=8080` & visit **localhost:8080** to access your analytics dashboard.
To start tracking, include the following JavaScript on your site and replace `yourfathom.com` with the URL to your Fathom instance.

View File

@ -6,7 +6,6 @@ import (
"github.com/joho/godotenv"
"github.com/usefathom/fathom/pkg/commands"
"github.com/usefathom/fathom/pkg/counter"
"github.com/usefathom/fathom/pkg/datastore"
"github.com/kelseyhightower/envconfig"
@ -27,8 +26,6 @@ var (
registerPassword = register.Arg("password", "Password for user.").Required().String()
server = app.Command("server", "Start webserver.").Default()
serverPort = server.Flag("port", "Port to listen on.").Default("8080").Int()
serverWebRoot = server.Flag("webroot", "Root directory of static assets").Default("./").String()
archive = app.Command("archive", "Process unarchived data.")
)
func main() {
@ -46,15 +43,8 @@ func main() {
commands.Register(*registerEmail, *registerPassword)
case "server":
commands.Server(*serverPort, *serverWebRoot)
case "archive":
err := counter.Aggregate()
if err != nil {
log.Warn(err)
}
commands.Server(*serverPort)
}
}
func parseConfig() *Config {
@ -65,6 +55,11 @@ func parseConfig() *Config {
log.Fatalf("Error parsing Fathom config from environment: %s", err)
}
// alias sqlite to sqlite3
if cfg.Database.Driver == "sqlite" {
cfg.Database.Driver = "sqlite3"
}
// if secret key is empty, use a randomly generated one to ease first-time installation
if cfg.Secret == "" {
cfg.Secret = randomString(40)

View File

@ -1,11 +1,12 @@
package api
import (
"github.com/gobuffalo/packr"
"github.com/gorilla/mux"
"net/http"
)
func Routes(webroot string) *mux.Router {
func Routes() *mux.Router {
// register routes
r := mux.NewRouter()
r.Handle("/collect", NewCollectHandler()).Methods(http.MethodGet)
@ -21,7 +22,7 @@ func Routes(webroot string) *mux.Router {
r.Handle("/api/stats/pages", Authorize(GetPageStatsHandler)).Methods(http.MethodGet)
r.Handle("/api/stats/referrers", Authorize(GetReferrerStatsHandler)).Methods(http.MethodGet)
r.Path("/tracker.js").Handler(http.FileServer(http.Dir(webroot + "/js/")))
r.PathPrefix("/").Handler(http.FileServer(http.Dir(webroot)))
r.Path("/tracker.js").Handler(http.FileServer(packr.NewBox("./../../build/js")))
r.PathPrefix("/").Handler(http.FileServer(packr.NewBox("./../../build")))
return r
}

View File

@ -11,9 +11,9 @@ import (
)
// Server starts the HTTP server, listening on the given port
func Server(port int, webroot string) {
r := api.Routes(webroot)
log.Printf("Now serving %s on port %d/\n", webroot, port)
func Server(port int) {
r := api.Routes()
log.Printf("Now serving on port %d/\n", port)
err := http.ListenAndServe(fmt.Sprintf(":%d", port), handlers.LoggingHandler(os.Stdout, r))
if err != nil {

View File

@ -6,6 +6,7 @@ import (
_ "github.com/go-sql-driver/mysql" // mysql driver
"github.com/jmoiron/sqlx"
//_ "github.com/lib/pq" // postgresql driver
"github.com/gobuffalo/packr"
_ "github.com/mattn/go-sqlite3" //sqlite3 driver
migrate "github.com/rubenv/sql-migrate"
log "github.com/sirupsen/logrus"
@ -34,10 +35,9 @@ func New(c *Config) *sqlx.DB {
// TODO: Move to command (but still auto-run on boot).
func runMigrations(driver string) {
migrations := migrate.FileMigrationSource{
Dir: "pkg/datastore/migrations/" + driver, // TODO: Move to bindata
migrations := &migrate.PackrMigrationSource{
Box: packr.NewBox("./migrations/" + driver),
}
migrate.SetTable("migrations")
n, err := migrate.Exec(dbx.DB, driver, migrations, migrate.Up)

8
vendor/github.com/gobuffalo/packr/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,8 @@
The MIT License (MIT)
Copyright (c) 2016 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

145
vendor/github.com/gobuffalo/packr/README.md generated vendored Normal file
View File

@ -0,0 +1,145 @@
# packr
[![GoDoc](https://godoc.org/github.com/gobuffalo/packr?status.svg)](https://godoc.org/github.com/gobuffalo/packr)
Packr is a simple solution for bundling static assets inside of Go binaries. Most importantly it does it in a way that is friendly to developers while they are developing.
## Intro Video
To get an idea of the what and why of packr, please enjoy this short video: [https://vimeo.com/219863271](https://vimeo.com/219863271).
## Installation
```text
$ go get -u github.com/gobuffalo/packr/...
```
## Usage
### In Code
The first step in using Packr is to create a new box. A box represents a folder on disk. Once you have a box you can get `string` or `[]byte` representations of the file.
```go
// set up a new box by giving it a (relative) path to a folder on disk:
box := packr.NewBox("./templates")
// Get the string representation of a file:
html := box.String("index.html")
// Get the string representation of a file, or an error if it doesn't exist:
html, err := box.MustString("index.html")
// Get the []byte representation of a file:
html := box.Bytes("index.html")
// Get the []byte representation of a file, or an error if it doesn't exist:
html, err := box.MustBytes("index.html")
```
### What is a Box?
A box represents a folder, and any sub-folders, on disk that you want to have access to in your binary. When compiling a binary using the `packr` CLI the contents of the folder will be converted into Go files that can be compiled inside of a "standard" go binary. Inside of the compiled binary the files will be read from memory. When working locally the files will be read directly off of disk. This is a seamless switch that doesn't require any special attention on your part.
#### Example
Assume the follow directory structure:
```
├── main.go
└── templates
├── admin
│   └── index.html
└── index.html
```
The following program will read the `./templates/admin/index.html` file and print it out.
```go
package main
import (
"fmt"
"github.com/gobuffalo/packr"
)
func main() {
box := packr.NewBox("./templates")
s := box.String("admin/index.html")
fmt.Println(s)
}
```
### Development Made Easy
In order to get static files into a Go binary, those files must first be converted to Go code. To do that, Packr, ships with a few tools to help build binaries. See below.
During development, however, it is painful to have to keep running a tool to compile those files.
Packr uses the following resolution rules when looking for a file:
1. Look for the file in-memory (inside a Go binary)
1. Look for the file on disk (during development)
Because Packr knows how to fall through to the file system, developers don't need to worry about constantly compiling their static files into a binary. They can work unimpeded.
Packr takes file resolution a step further. When declaring a new box you use a relative path, `./templates`. When Packr recieves this call it calculates out the absolute path to that directory. By doing this it means you can be guaranteed that Packr can find your files correctly, even if you're not running in the directory that the box was created in. This helps with the problem of testing, where Go changes the `pwd` for each package, making relative paths difficult to work with. This is not a problem when using Packr.
---
## Usage with HTTP
A box implements the [`http.FileSystem`](https://golang.org/pkg/net/http/#FileSystemhttps://golang.org/pkg/net/http/#FileSystem) interface, meaning it can be used to serve static files.
```go
package main
import (
"net/http"
"github.com/gobuffalo/packr"
)
func main() {
box := packr.NewBox("./templates")
http.Handle("/", http.FileServer(box))
http.ListenAndServe(":3000", nil)
}
```
---
## Building a Binary (the easy way)
When it comes time to build, or install, your Go binary, simply use `packr build` or `packr install` just as you would `go build` or `go install`. All flags for the `go` tool are supported and everything works the way you expect, the only difference is your static assets are now bundled in the generated binary. If you want more control over how this happens, looking at the following section on building binaries (the hard way).
## Building a Binary (the hard way)
Before you build your Go binary, run the `packr` command first. It will look for all the boxes in your code and then generate `.go` files that pack the static files into bytes that can be bundled into the Go binary.
```
$ packr
```
Then run your `go build command` like normal.
*NOTE*: It is not recommended to check-in these generated `-packr.go` files. They can be large, and can easily become out of date if not careful. It is recommended that you always run `packr clean` after running the `packr` tool.
#### Cleaning Up
When you're done it is recommended that you run the `packr clean` command. This will remove all of the generated files that Packr created for you.
```
$ packr clean
```
Why do you want to do this? Packr first looks to the information stored in these generated files, if the information isn't there it looks to disk. This makes it easy to work with in development.
---
## Debugging
The `packr` command passes all arguments down to the underlying `go` command, this includes the `-v` flag to print out `go build` information. Packr looks for the `-v` flag, and will turn on its own verbose logging. This is very useful for trying to understand what the `packr` command is doing when it is run.

241
vendor/github.com/gobuffalo/packr/box.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package packr
import (
"bytes"
"compress/gzip"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/pkg/errors"
)
var (
ErrResOutsideBox = errors.New("Can't find a resource outside the box")
)
// NewBox returns a Box that can be used to
// retrieve files from either disk or the embedded
// binary.
func NewBox(path string) Box {
var cd string
if !filepath.IsAbs(path) {
_, filename, _, _ := runtime.Caller(1)
cd = filepath.Dir(filename)
}
// this little hack courtesy of the `-cover` flag!!
cov := filepath.Join("_test", "_obj_test")
cd = strings.Replace(cd, string(filepath.Separator)+cov, "", 1)
if !filepath.IsAbs(cd) && cd != "" {
cd = filepath.Join(GoPath(), "src", cd)
}
return Box{
Path: path,
callingDir: cd,
data: map[string][]byte{},
}
}
// Box represent a folder on a disk you want to
// have access to in the built Go binary.
type Box struct {
Path string
callingDir string
data map[string][]byte
directories map[string]bool
}
func (b Box) AddString(path string, t string) {
b.AddBytes(path, []byte(t))
}
func (b Box) AddBytes(path string, t []byte) {
b.data[path] = t
}
// String of the file asked for or an empty string.
func (b Box) String(name string) string {
return string(b.Bytes(name))
}
// MustString returns either the string of the requested
// file or an error if it can not be found.
func (b Box) MustString(name string) (string, error) {
bb, err := b.MustBytes(name)
return string(bb), err
}
// Bytes of the file asked for or an empty byte slice.
func (b Box) Bytes(name string) []byte {
bb, _ := b.MustBytes(name)
return bb
}
// MustBytes returns either the byte slice of the requested
// file or an error if it can not be found.
func (b Box) MustBytes(name string) ([]byte, error) {
f, err := b.find(name)
if err == nil {
bb := &bytes.Buffer{}
bb.ReadFrom(f)
return bb.Bytes(), err
}
return nil, err
}
// Has returns true if the resource exists in the box
func (b Box) Has(name string) bool {
_, err := b.find(name)
if err != nil {
return false
}
return true
}
func (b Box) decompress(bb []byte) []byte {
reader, err := gzip.NewReader(bytes.NewReader(bb))
if err != nil {
return bb
}
data, err := ioutil.ReadAll(reader)
if err != nil {
return bb
}
return data
}
func (b Box) find(name string) (File, error) {
if bb, ok := b.data[name]; ok {
return newVirtualFile(name, bb), nil
}
if b.directories == nil {
b.indexDirectories()
}
cleanName := filepath.ToSlash(filepath.Clean(name))
// Ensure name is not outside the box
if strings.HasPrefix(cleanName, "../") {
return nil, ErrResOutsideBox
}
// Absolute name is considered as relative to the box root
cleanName = strings.TrimPrefix(cleanName, "/")
// Try to get the resource from the box
if _, ok := data[b.Path]; ok {
if bb, ok := data[b.Path][cleanName]; ok {
bb = b.decompress(bb)
return newVirtualFile(cleanName, bb), nil
}
if filepath.Ext(cleanName) != "" {
// The Handler created by http.FileSystem checks for those errors and
// returns http.StatusNotFound instead of http.StatusInternalServerError.
return nil, os.ErrNotExist
}
if _, ok := b.directories[cleanName]; ok {
return newVirtualDir(cleanName), nil
}
return nil, os.ErrNotExist
}
// Not found in the box virtual fs, try to get it from the file system
cleanName = filepath.FromSlash(cleanName)
p := filepath.Join(b.callingDir, b.Path, cleanName)
return fileFor(p, cleanName)
}
type WalkFunc func(string, File) error
func (b Box) Walk(wf WalkFunc) error {
if data[b.Path] == nil {
base, err := filepath.EvalSymlinks(filepath.Join(b.callingDir, b.Path))
if err != nil {
return errors.WithStack(err)
}
return filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
cleanName, err := filepath.Rel(base, path)
if err != nil {
cleanName = strings.TrimPrefix(path, base)
}
cleanName = filepath.ToSlash(filepath.Clean(cleanName))
cleanName = strings.TrimPrefix(cleanName, "/")
cleanName = filepath.FromSlash(cleanName)
if info == nil || info.IsDir() {
return nil
}
file, err := fileFor(path, cleanName)
if err != nil {
return err
}
return wf(cleanName, file)
})
}
for n := range data[b.Path] {
f, err := b.find(n)
if err != nil {
return err
}
err = wf(n, f)
if err != nil {
return err
}
}
return nil
}
// Open returns a File using the http.File interface
func (b Box) Open(name string) (http.File, error) {
return b.find(name)
}
// List shows "What's in the box?"
func (b Box) List() []string {
var keys []string
if b.data == nil || len(b.data) == 0 {
b.Walk(func(path string, info File) error {
finfo, _ := info.FileInfo()
if !finfo.IsDir() {
keys = append(keys, finfo.Name())
}
return nil
})
} else {
for k := range b.data {
keys = append(keys, k)
}
}
return keys
}
func (b *Box) indexDirectories() {
b.directories = map[string]bool{}
if _, ok := data[b.Path]; ok {
for name := range data[b.Path] {
prefix, _ := path.Split(name)
// Even on Windows the suffix appears to be a /
prefix = strings.TrimSuffix(prefix, "/")
b.directories[prefix] = true
}
}
}
func fileFor(p string, name string) (File, error) {
fi, err := os.Stat(p)
if err != nil {
return nil, err
}
if fi.IsDir() {
return newVirtualDir(p), nil
}
if bb, err := ioutil.ReadFile(p); err == nil {
return newVirtualFile(name, bb), nil
}
return nil, os.ErrNotExist
}

27
vendor/github.com/gobuffalo/packr/env.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package packr
import (
"go/build"
"os"
"strings"
)
// GoPath returns the current GOPATH env var
// or if it's missing, the default.
func GoPath() string {
go_path := strings.Split(os.Getenv("GOPATH"), string(os.PathListSeparator))
if len(go_path) == 0 || go_path[0] == "" {
return build.Default.GOPATH
}
return go_path[0]
}
// GoBin returns the current GO_BIN env var
// or if it's missing, a default of "go"
func GoBin() string {
go_bin := os.Getenv("GO_BIN")
if go_bin == "" {
return "go"
}
return go_bin
}

15
vendor/github.com/gobuffalo/packr/file.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package packr
import (
"io"
"os"
)
type File interface {
io.ReadCloser
io.Writer
FileInfo() (os.FileInfo, error)
Readdir(count int) ([]os.FileInfo, error)
Seek(offset int64, whence int) (int64, error)
Stat() (os.FileInfo, error)
}

38
vendor/github.com/gobuffalo/packr/file_info.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package packr
import (
"os"
"time"
)
type fileInfo struct {
Path string
Contents []byte
size int64
modTime time.Time
isDir bool
}
func (f fileInfo) Name() string {
return f.Path
}
func (f fileInfo) Size() int64 {
return f.size
}
func (f fileInfo) Mode() os.FileMode {
return 0444
}
func (f fileInfo) ModTime() time.Time {
return f.modTime
}
func (f fileInfo) IsDir() bool {
return f.isDir
}
func (f fileInfo) Sys() interface{} {
return nil
}

48
vendor/github.com/gobuffalo/packr/packr.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package packr
import (
"bytes"
"compress/gzip"
"encoding/json"
"sync"
)
var gil = &sync.Mutex{}
var data = map[string]map[string][]byte{}
// PackBytes packs bytes for a file into a box.
func PackBytes(box string, name string, bb []byte) {
gil.Lock()
defer gil.Unlock()
if _, ok := data[box]; !ok {
data[box] = map[string][]byte{}
}
data[box][name] = bb
}
// PackBytesGzip packets the gzipped compressed bytes into a box.
func PackBytesGzip(box string, name string, bb []byte) error {
var buf bytes.Buffer
w := gzip.NewWriter(&buf)
_, err := w.Write(bb)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
PackBytes(box, name, buf.Bytes())
return nil
}
// PackJSONBytes packs JSON encoded bytes for a file into a box.
func PackJSONBytes(box string, name string, jbb string) error {
var bb []byte
err := json.Unmarshal([]byte(jbb), &bb)
if err != nil {
return err
}
PackBytes(box, name, bb)
return nil
}

13
vendor/github.com/gobuffalo/packr/physical_file.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package packr
import "os"
var _ File = physicalFile{}
type physicalFile struct {
*os.File
}
func (p physicalFile) FileInfo() (os.FileInfo, error) {
return os.Stat(p.Name())
}

57
vendor/github.com/gobuffalo/packr/virtual_file.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package packr
import (
"bytes"
"fmt"
"os"
"time"
)
var virtualFileModTime = time.Now()
var _ File = virtualFile{}
type virtualFile struct {
*bytes.Reader
Name string
info fileInfo
}
func (f virtualFile) FileInfo() (os.FileInfo, error) {
return f.info, nil
}
func (f virtualFile) Close() error {
return nil
}
func (f virtualFile) Write(p []byte) (n int, err error) {
return 0, fmt.Errorf("not implemented")
}
func (f virtualFile) Readdir(count int) ([]os.FileInfo, error) {
return []os.FileInfo{f.info}, nil
}
func (f virtualFile) Stat() (os.FileInfo, error) {
return f.info, nil
}
func newVirtualFile(name string, b []byte) File {
return virtualFile{
Reader: bytes.NewReader(b),
Name: name,
info: fileInfo{
Path: name,
Contents: b,
size: int64(len(b)),
modTime: virtualFileModTime,
},
}
}
func newVirtualDir(name string) File {
var b []byte
v := newVirtualFile(name, b).(virtualFile)
v.info.isDir = true
return v
}

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View File

@ -0,0 +1,52 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## License
BSD-2-Clause

32
vendor/github.com/pkg/errors/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

269
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View File

@ -0,0 +1,269 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

147
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}

21
vendor/github.com/rubenv/sql-migrate/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (C) 2014-2017 by Ruben Vermeersch <ruben@rocketeer.be>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -21,17 +21,20 @@ Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://
To install the library and command line program, use the following:
```bash
go get github.com/rubenv/sql-migrate/...
go get -v github.com/rubenv/sql-migrate/...
```
## Usage
### As a standalone tool
```
$ sql-migrate --help
usage: sql-migrate [--version] [--help] <command> [<args>]
Available commands are:
down Undo a database migration
new Create a new migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
@ -72,6 +75,8 @@ Options:
-dryrun Don't apply migrations, just print them.
```
The `new` command creates a new empty migration template using the following pattern `<current time>-<name>.sql`.
The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter.
The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
@ -88,7 +93,22 @@ $ sql-migrate status
+---------------+-----------------------------------------+
```
### MySQL Caveat
If you are using MySQL, you must append `?parseTime=true` to the `datasource` configuration. For example:
```yml
production:
dialect: mysql
datasource: root@/dbname?parseTime=true
dir: migrations/mysql
table: migrations
```
See [here](https://github.com/go-sql-driver/mysql#parsetime) for more information.
### As a library
Import sql-migrate into your application:
```go
@ -114,6 +134,11 @@ migrations := &migrate.FileMigrationSource{
Dir: "db/migrations",
}
// OR: Use migrations from a packr box
migrations := &migrate.PackrMigrationSource{
Box: packr.NewBox("./migrations"),
}
// OR: Use migrations from bindata:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
@ -157,6 +182,10 @@ DROP TABLE people;
You can put multiple statements in each block, as long as you end them with a semicolon (`;`).
You can alternatively set up a separator string that matches an entire line by setting `sqlparse.LineSeparator`. This
can be used to imitate, for example, MS SQL Query Analyzer functionality where commands can be separated by a line with
contents of `GO`. If `sqlparse.LineSeparator` is matched, it will not be included in the resulting migration scripts.
If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries:
```sql
@ -192,8 +221,32 @@ CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id);
DROP INDEX people_unique_id_idx;
```
## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata)
If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files.
## Embedding migrations with [packr](https://github.com/gobuffalo/packr)
If you like your Go applications self-contained (that is: a single binary): use [packr](https://github.com/gobuffalo/packr) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Use the `PackrMigrationSource` in your application to find the migrations:
```go
migrations := &migrate.PackrMigrationSource{
Box: packr.NewBox("./migrations"),
}
```
If you already have a box and would like to use a subdirectory:
```go
migrations := &migrate.PackrMigrationSource{
Box: myBox,
Dir: "./migrations",
}
```
## Embedding migrations with [bindata](https://github.com/shuLhan/go-bindata)
As an alternative, but slightly less maintained, you can use [bindata](https://github.com/shuLhan/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
@ -220,6 +273,7 @@ Both `Asset` and `AssetDir` are functions provided by bindata.
Then proceed as usual.
## Extending
Adding a new migration source means implementing `MigrationSource`.
```go
@ -230,26 +284,6 @@ type MigrationSource interface {
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field.
## License
## License
(The MIT License)
Copyright (C) 2014-2016 by Ruben Vermeersch <ruben@rocketeer.be>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This library is distributed under the [MIT](LICENSE) license.

View File

@ -16,7 +16,7 @@ Installation
To install the library and command line program, use the following:
go get github.com/rubenv/sql-migrate/...
go get -v github.com/rubenv/sql-migrate/...
Command-line tool
@ -27,6 +27,7 @@ The main command is called sql-migrate.
Available commands are:
down Undo a database migration
new Create a new migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
@ -76,6 +77,18 @@ Use the status command to see the state of the applied migrations:
| 2_record.sql | no |
+---------------+-----------------------------------------+
MySQL Caveat
If you are using MySQL, you must append ?parseTime=true to the datasource configuration. For example:
production:
dialect: mysql
datasource: root@/dbname?parseTime=true
dir: migrations/mysql
table: migrations
See https://github.com/go-sql-driver/mysql#parsetime for more information.
Library
Import sql-migrate into your application:
@ -170,9 +183,28 @@ Normally each migration is run within a transaction in order to guarantee that i
-- +migrate Down
DROP INDEX people_unique_id_idx;
Embedding migrations with packr
If you like your Go applications self-contained (that is: a single binary): use packr (https://github.com/gobuffalo/packr) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Use the PackrMigrationSource in your application to find the migrations:
migrations := &migrate.PackrMigrationSource{
Box: packr.NewBox("./migrations"),
}
If you already have a box and would like to use a subdirectory:
migrations := &migrate.PackrMigrationSource{
Box: myBox,
Dir: "./migrations",
}
Embedding migrations with bindata
If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files.
As an alternative, but slightly less maintained, you can use bindata (https://github.com/shuLhan/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.

View File

@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"io"
"os"
"net/http"
"path"
"regexp"
"sort"
@ -144,10 +144,25 @@ type MemoryMigrationSource struct {
var _ MigrationSource = (*MemoryMigrationSource)(nil)
func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) {
// Make sure migrations are sorted
sort.Sort(byId(m.Migrations))
// Make sure migrations are sorted. In order to make the MemoryMigrationSource safe for
// concurrent use we should not mutate it in place. So `FindMigrations` would sort a copy
// of the m.Migrations.
migrations := make([]*Migration, len(m.Migrations))
copy(migrations, m.Migrations)
sort.Sort(byId(migrations))
return migrations, nil
}
return m.Migrations, nil
// A set of migrations loaded from an http.FileServer
type HttpFileSystemMigrationSource struct {
FileSystem http.FileSystem
}
var _ MigrationSource = (*HttpFileSystemMigrationSource)(nil)
func (f HttpFileSystemMigrationSource) FindMigrations() ([]*Migration, error) {
return findMigrations(f.FileSystem)
}
// A set of migrations loaded from a directory.
@ -158,9 +173,14 @@ type FileMigrationSource struct {
var _ MigrationSource = (*FileMigrationSource)(nil)
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
filesystem := http.Dir(f.Dir)
return findMigrations(filesystem)
}
func findMigrations(dir http.FileSystem) ([]*Migration, error) {
migrations := make([]*Migration, 0)
file, err := os.Open(f.Dir)
file, err := dir.Open("/")
if err != nil {
return nil, err
}
@ -172,14 +192,14 @@ func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
for _, info := range files {
if strings.HasSuffix(info.Name(), ".sql") {
file, err := os.Open(path.Join(f.Dir, info.Name()))
file, err := dir.Open(info.Name())
if err != nil {
return nil, err
return nil, fmt.Errorf("Error while opening %s: %s", info.Name(), err)
}
migration, err := ParseMigration(info.Name(), file)
if err != nil {
return nil, err
return nil, fmt.Errorf("Error while parsing %s: %s", info.Name(), err)
}
migrations = append(migrations, migration)
@ -236,6 +256,60 @@ func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
return migrations, nil
}
// Avoids pulling in the packr library for everyone, mimicks the bits of
// packr.Box that we need.
type PackrBox interface {
List() []string
Bytes(name string) []byte
}
// Migrations from a packr box.
type PackrMigrationSource struct {
Box PackrBox
// Path in the box to use.
Dir string
}
var _ MigrationSource = (*PackrMigrationSource)(nil)
func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) {
migrations := make([]*Migration, 0)
items := p.Box.List()
prefix := ""
dir := path.Clean(p.Dir)
if dir != "." {
prefix = fmt.Sprintf("%s/", dir)
}
for _, item := range items {
if !strings.HasPrefix(item, prefix) {
continue
}
name := strings.TrimPrefix(item, prefix)
if strings.Contains(name, "/") {
continue
}
if strings.HasSuffix(name, ".sql") {
file := p.Box.Bytes(item)
migration, err := ParseMigration(name, bytes.NewReader(file))
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
}
// Make sure migrations are sorted
sort.Sort(byId(migrations))
return migrations, nil
}
// Migration parsing
func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
m := &Migration{
@ -244,7 +318,7 @@ func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
parsed, err := sqlparse.ParseMigration(r)
if err != nil {
return nil, err
return nil, fmt.Errorf("Error parsing migration (%s): %s", id, err)
}
m.Up = parsed.UpStatements
@ -304,22 +378,31 @@ func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirecti
}
}
if dir == Up {
switch dir {
case Up:
err = executor.Insert(&MigrationRecord{
Id: migration.Id,
AppliedAt: time.Now(),
})
if err != nil {
if trans, ok := executor.(*gorp.Transaction); ok {
trans.Rollback()
}
return applied, newTxError(migration, err)
}
} else if dir == Down {
case Down:
_, err := executor.Delete(&MigrationRecord{
Id: migration.Id,
})
if err != nil {
if trans, ok := executor.(*gorp.Transaction); ok {
trans.Rollback()
}
return applied, newTxError(migration, err)
}
} else {
default:
panic("Not possible")
}
@ -402,6 +485,55 @@ func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationD
return result, dbMap, nil
}
// Skip a set of migrations
//
// Will skip at most `max` migrations. Pass 0 for no limit.
//
// Returns the number of skipped migrations.
func SkipMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) {
migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max)
if err != nil {
return 0, err
}
// Skip migrations
applied := 0
for _, migration := range migrations {
var executor SqlExecutor
if migration.DisableTransaction {
executor = dbMap
} else {
executor, err = dbMap.Begin()
if err != nil {
return applied, newTxError(migration, err)
}
}
err = executor.Insert(&MigrationRecord{
Id: migration.Id,
AppliedAt: time.Now(),
})
if err != nil {
if trans, ok := executor.(*gorp.Transaction); ok {
trans.Rollback()
}
return applied, newTxError(migration, err)
}
if trans, ok := executor.(*gorp.Transaction); ok {
if err := trans.Commit(); err != nil {
return applied, newTxError(migration, err)
}
}
applied++
}
return applied, nil
}
// Filter a slice of migrations into ones that should be applied.
func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration {
var index = -1
@ -482,7 +614,8 @@ func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
var out *time.Time
err := db.QueryRow("SELECT NOW()").Scan(&out)
if err != nil {
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" {
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" ||
err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" {
return nil, errors.New(`Cannot parse dates.
Make sure that the parseTime option is supplied to your database connection.

18
vendor/vendor.json vendored
View File

@ -32,6 +32,12 @@
"revision": "2e00b5cd70399450106cec6431c2e2ce3cae5034",
"revisionTime": "2016-12-24T12:10:19Z"
},
{
"checksumSHA1": "8h+jn5DM8qpamdOYYWCCInJZzyM=",
"path": "github.com/gobuffalo/packr",
"revision": "7f4074995d431987caaa35088199f13c44b24440",
"revisionTime": "2018-04-13T17:35:21Z"
},
{
"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
"path": "github.com/gorilla/context",
@ -111,10 +117,16 @@
"revisionTime": "2016-12-08T17:59:04Z"
},
{
"checksumSHA1": "Qgwcmaxcms96cseiKeTu73RiKJw=",
"checksumSHA1": "ljd3FhYRJ91cLZz3wsH9BQQ2JbA=",
"path": "github.com/pkg/errors",
"revision": "816c9085562cd7ee03e7f8188a1cfd942858cded",
"revisionTime": "2018-03-11T21:45:15Z"
},
{
"checksumSHA1": "6nYX6dKRoU5uicZ5wa9nD7dq22Q=",
"path": "github.com/rubenv/sql-migrate",
"revision": "1ed79968dfca5de79adb13c84523caaa4fc865a9",
"revisionTime": "2017-01-04T09:54:22Z"
"revision": "081fe17d19ff4e2dd9f5a0c1158e6bcf74da6906",
"revisionTime": "2018-02-17T20:35:53Z"
},
{
"checksumSHA1": "IQ6NtaJ+F1rtAMIaCetzKb5nrxQ=",