mirror of https://github.com/status-im/fathom.git
use sql migrate for handling database migrations & run on boot
This commit is contained in:
parent
46f954cc6e
commit
e47183230d
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/dannyvankooten/ana/api"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
@ -33,7 +34,7 @@ func Server(port int) {
|
|||
r.Path("/tracker.js").Handler(http.FileServer(http.Dir("./static/js/")))
|
||||
r.Handle("/", http.FileServer(http.Dir("./views/")))
|
||||
|
||||
fmt.Printf("HTTP server will now start listening on :%d\n", port)
|
||||
log.Printf("HTTP server will now start listening on :%d\n", port)
|
||||
err := http.ListenAndServe(fmt.Sprintf(":%d", port), handlers.LoggingHandler(os.Stdout, r))
|
||||
fmt.Println(err)
|
||||
log.Println(err)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dannyvankooten/ana/datastore"
|
||||
"github.com/dannyvankooten/ana/models"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Register creates a new user with the given email & password
|
||||
|
@ -16,5 +16,5 @@ func Register(email string, password string) {
|
|||
}
|
||||
user.Save(datastore.DB)
|
||||
|
||||
fmt.Printf("User %s #%d created.\n", email, user.ID)
|
||||
log.Printf("User %s #%d created.\n", email, user.ID)
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
DROP TABLE IF EXISTS pageviews;
|
||||
DROP TABLE if exists visitors;
|
||||
DROP TABLE IF EXISTS pages;
|
||||
DROP TABLE IF EXISTS sites;
|
||||
DROP TABLE IF EXISTS users;
|
||||
DROP TABLE IF EXISTS total_pageviews;
|
||||
DROP TABLE IF EXISTS total_visitors;
|
||||
DROP TABLE IF EXISTS total_browser_languages;
|
||||
DROP TABLE IF EXISTS total_screens;
|
||||
DROP TABLE IF EXISTS total_browser_names;
|
||||
DROP TABLE IF EXISTS total_referrers;
|
||||
DROP TABLE IF EXISTS options;
|
|
@ -1,3 +1,4 @@
|
|||
-- +migrate Up
|
||||
|
||||
CREATE TABLE visitors(
|
||||
`id` INTEGER UNSIGNED AUTO_INCREMENT PRIMARY KEY NOT NULL,
|
||||
|
@ -100,3 +101,17 @@ CREATE TABLE `total_referrers` (
|
|||
);
|
||||
CREATE INDEX total_referrers_date ON total_referrers(`date`);
|
||||
ALTER TABLE total_referrers ADD UNIQUE(`value`, `date`);
|
||||
|
||||
-- +migrate Down
|
||||
DROP TABLE IF EXISTS pageviews;
|
||||
DROP TABLE if exists visitors;
|
||||
DROP TABLE IF EXISTS pages;
|
||||
DROP TABLE IF EXISTS sites;
|
||||
DROP TABLE IF EXISTS users;
|
||||
DROP TABLE IF EXISTS total_pageviews;
|
||||
DROP TABLE IF EXISTS total_visitors;
|
||||
DROP TABLE IF EXISTS total_browser_languages;
|
||||
DROP TABLE IF EXISTS total_screens;
|
||||
DROP TABLE IF EXISTS total_browser_names;
|
||||
DROP TABLE IF EXISTS total_referrers;
|
||||
DROP TABLE IF EXISTS options;
|
|
@ -3,11 +3,11 @@ package datastore
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql" // mysql driver
|
||||
_ "github.com/lib/pq" // postgresql driver
|
||||
"github.com/rubenv/sql-migrate"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// DB ...
|
||||
|
@ -21,6 +21,10 @@ func Init() *sql.DB {
|
|||
}
|
||||
|
||||
DB = New(driver, getDSN(driver))
|
||||
|
||||
// run migrations
|
||||
runMigrations(driver)
|
||||
|
||||
return DB
|
||||
}
|
||||
|
||||
|
@ -47,9 +51,26 @@ func getDSN(driver string) string {
|
|||
os.Getenv("ANA_DATABASE_NAME"),
|
||||
)
|
||||
|
||||
if driver == "postgres" {
|
||||
switch driver {
|
||||
case "postgres":
|
||||
dsn = "postgres://" + dsn
|
||||
case "mysql":
|
||||
dsn = dsn + "?parseTime=true"
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
func runMigrations(driver string) {
|
||||
migrations := migrate.FileMigrationSource{
|
||||
Dir: "datastore/migrations",
|
||||
}
|
||||
|
||||
migrate.SetTable("migrations")
|
||||
n, err := migrate.Exec(DB, driver, migrations, migrate.Up)
|
||||
if err != nil {
|
||||
log.Fatal("Database migrations failed: ", err)
|
||||
}
|
||||
|
||||
log.Printf("Applied %d database migrations!\n", n)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
development:
|
||||
dialect: mysql
|
||||
datasource: root:root@/ana
|
||||
dir: datastore/migrations
|
|
@ -0,0 +1,255 @@
|
|||
# sql-migrate
|
||||
|
||||
> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose).
|
||||
|
||||
[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate)
|
||||
|
||||
Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate).
|
||||
|
||||
## Features
|
||||
|
||||
* Usable as a CLI tool or as a library
|
||||
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp))
|
||||
* Can embed migrations into your application
|
||||
* Migrations are defined with SQL for full flexibility
|
||||
* Atomic migrations
|
||||
* Up/down migrations to allow rollback
|
||||
* Supports multiple database types in one project
|
||||
|
||||
## Installation
|
||||
|
||||
To install the library and command line program, use the following:
|
||||
|
||||
```bash
|
||||
go get github.com/rubenv/sql-migrate/...
|
||||
```
|
||||
|
||||
## Usage
|
||||
### As a standalone tool
|
||||
```
|
||||
$ sql-migrate --help
|
||||
usage: sql-migrate [--version] [--help] <command> [<args>]
|
||||
|
||||
Available commands are:
|
||||
down Undo a database migration
|
||||
redo Reapply the last migration
|
||||
status Show migration status
|
||||
up Migrates the database to the most recent version available
|
||||
```
|
||||
|
||||
Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments:
|
||||
|
||||
```yml
|
||||
development:
|
||||
dialect: sqlite3
|
||||
datasource: test.db
|
||||
dir: migrations/sqlite3
|
||||
|
||||
production:
|
||||
dialect: postgres
|
||||
datasource: dbname=myapp sslmode=disable
|
||||
dir: migrations/postgres
|
||||
table: migrations
|
||||
```
|
||||
|
||||
The `table` setting is optional and will default to `gorp_migrations`.
|
||||
|
||||
The environment that will be used can be specified with the `-env` flag (defaults to `development`).
|
||||
|
||||
Use the `--help` flag in combination with any of the commands to get an overview of its usage:
|
||||
|
||||
```
|
||||
$ sql-migrate up --help
|
||||
Usage: sql-migrate up [options] ...
|
||||
|
||||
Migrates the database to the most recent version available.
|
||||
|
||||
Options:
|
||||
|
||||
-config=config.yml Configuration file to use.
|
||||
-env="development" Environment.
|
||||
-limit=0 Limit the number of migrations (0 = unlimited).
|
||||
-dryrun Don't apply migrations, just print them.
|
||||
```
|
||||
|
||||
The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter.
|
||||
|
||||
The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
|
||||
|
||||
Use the `status` command to see the state of the applied migrations:
|
||||
|
||||
```bash
|
||||
$ sql-migrate status
|
||||
+---------------+-----------------------------------------+
|
||||
| MIGRATION | APPLIED |
|
||||
+---------------+-----------------------------------------+
|
||||
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
|
||||
| 2_record.sql | no |
|
||||
+---------------+-----------------------------------------+
|
||||
```
|
||||
|
||||
### As a library
|
||||
Import sql-migrate into your application:
|
||||
|
||||
```go
|
||||
import "github.com/rubenv/sql-migrate"
|
||||
```
|
||||
|
||||
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
|
||||
|
||||
```go
|
||||
// Hardcoded strings in memory:
|
||||
migrations := &migrate.MemoryMigrationSource{
|
||||
Migrations: []*migrate.Migration{
|
||||
&migrate.Migration{
|
||||
Id: "123",
|
||||
Up: []string{"CREATE TABLE people (id int)"},
|
||||
Down: []string{"DROP TABLE people"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// OR: Read migrations from a folder:
|
||||
migrations := &migrate.FileMigrationSource{
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
// OR: Use migrations from bindata:
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "migrations",
|
||||
}
|
||||
```
|
||||
|
||||
Then use the `Exec` function to upgrade your database:
|
||||
|
||||
```go
|
||||
db, err := sql.Open("sqlite3", filename)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
|
||||
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
fmt.Printf("Applied %d migrations!\n", n)
|
||||
```
|
||||
|
||||
Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails.
|
||||
|
||||
Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation.
|
||||
|
||||
## Writing migrations
|
||||
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
|
||||
|
||||
```sql
|
||||
-- +migrate Up
|
||||
-- SQL in section 'Up' is executed when this migration is applied
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
-- SQL section 'Down' is executed when this migration is rolled back
|
||||
DROP TABLE people;
|
||||
```
|
||||
|
||||
You can put multiple statements in each block, as long as you end them with a semicolon (`;`).
|
||||
|
||||
If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries:
|
||||
|
||||
```sql
|
||||
-- +migrate Up
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
-- +migrate StatementBegin
|
||||
CREATE OR REPLACE FUNCTION do_something()
|
||||
returns void AS $$
|
||||
DECLARE
|
||||
create_query text;
|
||||
BEGIN
|
||||
-- Do something here
|
||||
END;
|
||||
$$
|
||||
language plpgsql;
|
||||
-- +migrate StatementEnd
|
||||
|
||||
-- +migrate Down
|
||||
DROP FUNCTION do_something();
|
||||
DROP TABLE people;
|
||||
```
|
||||
|
||||
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
|
||||
|
||||
Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the `notransaction` option:
|
||||
|
||||
```sql
|
||||
-- +migrate Up notransaction
|
||||
CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id);
|
||||
|
||||
-- +migrate Down
|
||||
DROP INDEX people_unique_id_idx;
|
||||
```
|
||||
|
||||
## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata)
|
||||
If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files.
|
||||
|
||||
Just write your migration files as usual, as a set of SQL files in a folder.
|
||||
|
||||
Then use bindata to generate a `.go` file with the migrations embedded:
|
||||
|
||||
```bash
|
||||
go-bindata -pkg myapp -o bindata.go db/migrations/
|
||||
```
|
||||
|
||||
The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives).
|
||||
|
||||
Use the `AssetMigrationSource` in your application to find the migrations:
|
||||
|
||||
```go
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
```
|
||||
|
||||
Both `Asset` and `AssetDir` are functions provided by bindata.
|
||||
|
||||
Then proceed as usual.
|
||||
|
||||
## Extending
|
||||
Adding a new migration source means implementing `MigrationSource`.
|
||||
|
||||
```go
|
||||
type MigrationSource interface {
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
```
|
||||
|
||||
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field.
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (C) 2014-2016 by Ruben Vermeersch <ruben@rocketeer.be>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
|
||||
SQL Schema migration tool for Go.
|
||||
|
||||
Key features:
|
||||
|
||||
* Usable as a CLI tool or as a library
|
||||
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp)
|
||||
* Can embed migrations into your application
|
||||
* Migrations are defined with SQL for full flexibility
|
||||
* Atomic migrations
|
||||
* Up/down migrations to allow rollback
|
||||
* Supports multiple database types in one project
|
||||
|
||||
Installation
|
||||
|
||||
To install the library and command line program, use the following:
|
||||
|
||||
go get github.com/rubenv/sql-migrate/...
|
||||
|
||||
Command-line tool
|
||||
|
||||
The main command is called sql-migrate.
|
||||
|
||||
$ sql-migrate --help
|
||||
usage: sql-migrate [--version] [--help] <command> [<args>]
|
||||
|
||||
Available commands are:
|
||||
down Undo a database migration
|
||||
redo Reapply the last migration
|
||||
status Show migration status
|
||||
up Migrates the database to the most recent version available
|
||||
|
||||
Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments:
|
||||
|
||||
development:
|
||||
dialect: sqlite3
|
||||
datasource: test.db
|
||||
dir: migrations/sqlite3
|
||||
|
||||
production:
|
||||
dialect: postgres
|
||||
datasource: dbname=myapp sslmode=disable
|
||||
dir: migrations/postgres
|
||||
table: migrations
|
||||
|
||||
The `table` setting is optional and will default to `gorp_migrations`.
|
||||
|
||||
The environment that will be used can be specified with the -env flag (defaults to development).
|
||||
|
||||
Use the --help flag in combination with any of the commands to get an overview of its usage:
|
||||
|
||||
$ sql-migrate up --help
|
||||
Usage: sql-migrate up [options] ...
|
||||
|
||||
Migrates the database to the most recent version available.
|
||||
|
||||
Options:
|
||||
|
||||
-config=config.yml Configuration file to use.
|
||||
-env="development" Environment.
|
||||
-limit=0 Limit the number of migrations (0 = unlimited).
|
||||
-dryrun Don't apply migrations, just print them.
|
||||
|
||||
The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter.
|
||||
|
||||
The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
|
||||
|
||||
Use the status command to see the state of the applied migrations:
|
||||
|
||||
$ sql-migrate status
|
||||
+---------------+-----------------------------------------+
|
||||
| MIGRATION | APPLIED |
|
||||
+---------------+-----------------------------------------+
|
||||
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
|
||||
| 2_record.sql | no |
|
||||
+---------------+-----------------------------------------+
|
||||
|
||||
Library
|
||||
|
||||
Import sql-migrate into your application:
|
||||
|
||||
import "github.com/rubenv/sql-migrate"
|
||||
|
||||
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
|
||||
|
||||
// Hardcoded strings in memory:
|
||||
migrations := &migrate.MemoryMigrationSource{
|
||||
Migrations: []*migrate.Migration{
|
||||
&migrate.Migration{
|
||||
Id: "123",
|
||||
Up: []string{"CREATE TABLE people (id int)"},
|
||||
Down: []string{"DROP TABLE people"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// OR: Read migrations from a folder:
|
||||
migrations := &migrate.FileMigrationSource{
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
// OR: Use migrations from bindata:
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "migrations",
|
||||
}
|
||||
|
||||
Then use the Exec function to upgrade your database:
|
||||
|
||||
db, err := sql.Open("sqlite3", filename)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
|
||||
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
fmt.Printf("Applied %d migrations!\n", n)
|
||||
|
||||
Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails.
|
||||
|
||||
The full set of capabilities can be found in the API docs below.
|
||||
|
||||
Writing migrations
|
||||
|
||||
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
|
||||
|
||||
-- +migrate Up
|
||||
-- SQL in section 'Up' is executed when this migration is applied
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
-- SQL section 'Down' is executed when this migration is rolled back
|
||||
DROP TABLE people;
|
||||
|
||||
You can put multiple statements in each block, as long as you end them with a semicolon (;).
|
||||
|
||||
If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries:
|
||||
|
||||
-- +migrate Up
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
-- +migrate StatementBegin
|
||||
CREATE OR REPLACE FUNCTION do_something()
|
||||
returns void AS $$
|
||||
DECLARE
|
||||
create_query text;
|
||||
BEGIN
|
||||
-- Do something here
|
||||
END;
|
||||
$$
|
||||
language plpgsql;
|
||||
-- +migrate StatementEnd
|
||||
|
||||
-- +migrate Down
|
||||
DROP FUNCTION do_something();
|
||||
DROP TABLE people;
|
||||
|
||||
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
|
||||
|
||||
Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the notransaction option:
|
||||
|
||||
-- +migrate Up notransaction
|
||||
CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id);
|
||||
|
||||
-- +migrate Down
|
||||
DROP INDEX people_unique_id_idx;
|
||||
|
||||
Embedding migrations with bindata
|
||||
|
||||
If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files.
|
||||
|
||||
Just write your migration files as usual, as a set of SQL files in a folder.
|
||||
|
||||
Then use bindata to generate a .go file with the migrations embedded:
|
||||
|
||||
go-bindata -pkg myapp -o bindata.go db/migrations/
|
||||
|
||||
The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives).
|
||||
|
||||
Use the AssetMigrationSource in your application to find the migrations:
|
||||
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
Both Asset and AssetDir are functions provided by bindata.
|
||||
|
||||
Then proceed as usual.
|
||||
|
||||
Extending
|
||||
|
||||
Adding a new migration source means implementing MigrationSource.
|
||||
|
||||
type MigrationSource interface {
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
|
||||
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field.
|
||||
*/
|
||||
package migrate
|
|
@ -0,0 +1,509 @@
|
|||
package migrate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rubenv/sql-migrate/sqlparse"
|
||||
"gopkg.in/gorp.v1"
|
||||
)
|
||||
|
||||
type MigrationDirection int
|
||||
|
||||
const (
|
||||
Up MigrationDirection = iota
|
||||
Down
|
||||
)
|
||||
|
||||
var tableName = "gorp_migrations"
|
||||
var schemaName = ""
|
||||
var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`)
|
||||
|
||||
// TxError is returned when any error is encountered during a database
|
||||
// transaction. It contains the relevant *Migration and notes it's Id in the
|
||||
// Error function output.
|
||||
type TxError struct {
|
||||
Migration *Migration
|
||||
Err error
|
||||
}
|
||||
|
||||
func newTxError(migration *PlannedMigration, err error) error {
|
||||
return &TxError{
|
||||
Migration: migration.Migration,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TxError) Error() string {
|
||||
return e.Err.Error() + " handling " + e.Migration.Id
|
||||
}
|
||||
|
||||
// Set the name of the table used to store migration info.
|
||||
//
|
||||
// Should be called before any other call such as (Exec, ExecMax, ...).
|
||||
func SetTable(name string) {
|
||||
if name != "" {
|
||||
tableName = name
|
||||
}
|
||||
}
|
||||
|
||||
// SetSchema sets the name of a schema that the migration table be referenced.
|
||||
func SetSchema(name string) {
|
||||
if name != "" {
|
||||
schemaName = name
|
||||
}
|
||||
}
|
||||
|
||||
type Migration struct {
|
||||
Id string
|
||||
Up []string
|
||||
Down []string
|
||||
|
||||
DisableTransactionUp bool
|
||||
DisableTransactionDown bool
|
||||
}
|
||||
|
||||
func (m Migration) Less(other *Migration) bool {
|
||||
switch {
|
||||
case m.isNumeric() && other.isNumeric() && m.VersionInt() != other.VersionInt():
|
||||
return m.VersionInt() < other.VersionInt()
|
||||
case m.isNumeric() && !other.isNumeric():
|
||||
return true
|
||||
case !m.isNumeric() && other.isNumeric():
|
||||
return false
|
||||
default:
|
||||
return m.Id < other.Id
|
||||
}
|
||||
}
|
||||
|
||||
func (m Migration) isNumeric() bool {
|
||||
return len(m.NumberPrefixMatches()) > 0
|
||||
}
|
||||
|
||||
func (m Migration) NumberPrefixMatches() []string {
|
||||
return numberPrefixRegex.FindStringSubmatch(m.Id)
|
||||
}
|
||||
|
||||
func (m Migration) VersionInt() int64 {
|
||||
v := m.NumberPrefixMatches()[1]
|
||||
value, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
type PlannedMigration struct {
|
||||
*Migration
|
||||
|
||||
DisableTransaction bool
|
||||
Queries []string
|
||||
}
|
||||
|
||||
type byId []*Migration
|
||||
|
||||
func (b byId) Len() int { return len(b) }
|
||||
func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) }
|
||||
|
||||
type MigrationRecord struct {
|
||||
Id string `db:"id"`
|
||||
AppliedAt time.Time `db:"applied_at"`
|
||||
}
|
||||
|
||||
var MigrationDialects = map[string]gorp.Dialect{
|
||||
"sqlite3": gorp.SqliteDialect{},
|
||||
"postgres": gorp.PostgresDialect{},
|
||||
"mysql": gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"},
|
||||
"mssql": gorp.SqlServerDialect{},
|
||||
"oci8": gorp.OracleDialect{},
|
||||
}
|
||||
|
||||
type MigrationSource interface {
|
||||
// Finds the migrations.
|
||||
//
|
||||
// The resulting slice of migrations should be sorted by Id.
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
|
||||
// A hardcoded set of migrations, in-memory.
|
||||
type MemoryMigrationSource struct {
|
||||
Migrations []*Migration
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*MemoryMigrationSource)(nil)
|
||||
|
||||
func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(m.Migrations))
|
||||
|
||||
return m.Migrations, nil
|
||||
}
|
||||
|
||||
// A set of migrations loaded from a directory.
|
||||
type FileMigrationSource struct {
|
||||
Dir string
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*FileMigrationSource)(nil)
|
||||
|
||||
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
migrations := make([]*Migration, 0)
|
||||
|
||||
file, err := os.Open(f.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := file.Readdir(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, info := range files {
|
||||
if strings.HasSuffix(info.Name(), ".sql") {
|
||||
file, err := os.Open(path.Join(f.Dir, info.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migration, err := ParseMigration(info.Name(), file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(migrations))
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// Migrations from a bindata asset set.
|
||||
type AssetMigrationSource struct {
|
||||
// Asset should return content of file in path if exists
|
||||
Asset func(path string) ([]byte, error)
|
||||
|
||||
// AssetDir should return list of files in the path
|
||||
AssetDir func(path string) ([]string, error)
|
||||
|
||||
// Path in the bindata to use.
|
||||
Dir string
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*AssetMigrationSource)(nil)
|
||||
|
||||
func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
migrations := make([]*Migration, 0)
|
||||
|
||||
files, err := a.AssetDir(a.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, name := range files {
|
||||
if strings.HasSuffix(name, ".sql") {
|
||||
file, err := a.Asset(path.Join(a.Dir, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migration, err := ParseMigration(name, bytes.NewReader(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(migrations))
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// Migration parsing
|
||||
func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
|
||||
m := &Migration{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
parsed, err := sqlparse.ParseMigration(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Up = parsed.UpStatements
|
||||
m.Down = parsed.DownStatements
|
||||
|
||||
m.DisableTransactionUp = parsed.DisableTransactionUp
|
||||
m.DisableTransactionDown = parsed.DisableTransactionDown
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
type SqlExecutor interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
Insert(list ...interface{}) error
|
||||
Delete(list ...interface{}) (int64, error)
|
||||
}
|
||||
|
||||
// Execute a set of migrations
|
||||
//
|
||||
// Returns the number of applied migrations.
|
||||
func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) {
|
||||
return ExecMax(db, dialect, m, dir, 0)
|
||||
}
|
||||
|
||||
// Execute a set of migrations
|
||||
//
|
||||
// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec).
|
||||
//
|
||||
// Returns the number of applied migrations.
|
||||
func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) {
|
||||
migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Apply migrations
|
||||
applied := 0
|
||||
for _, migration := range migrations {
|
||||
var executor SqlExecutor
|
||||
|
||||
if migration.DisableTransaction {
|
||||
executor = dbMap
|
||||
} else {
|
||||
executor, err = dbMap.Begin()
|
||||
if err != nil {
|
||||
return applied, newTxError(migration, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, stmt := range migration.Queries {
|
||||
if _, err := executor.Exec(stmt); err != nil {
|
||||
if trans, ok := executor.(*gorp.Transaction); ok {
|
||||
trans.Rollback()
|
||||
}
|
||||
|
||||
return applied, newTxError(migration, err)
|
||||
}
|
||||
}
|
||||
|
||||
if dir == Up {
|
||||
err = executor.Insert(&MigrationRecord{
|
||||
Id: migration.Id,
|
||||
AppliedAt: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
return applied, newTxError(migration, err)
|
||||
}
|
||||
} else if dir == Down {
|
||||
_, err := executor.Delete(&MigrationRecord{
|
||||
Id: migration.Id,
|
||||
})
|
||||
if err != nil {
|
||||
return applied, newTxError(migration, err)
|
||||
}
|
||||
} else {
|
||||
panic("Not possible")
|
||||
}
|
||||
|
||||
if trans, ok := executor.(*gorp.Transaction); ok {
|
||||
if err := trans.Commit(); err != nil {
|
||||
return applied, newTxError(migration, err)
|
||||
}
|
||||
}
|
||||
|
||||
applied++
|
||||
}
|
||||
|
||||
return applied, nil
|
||||
}
|
||||
|
||||
// Plan a migration.
|
||||
func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) {
|
||||
dbMap, err := getMigrationDbMap(db, dialect)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
migrations, err := m.FindMigrations()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var migrationRecords []MigrationRecord
|
||||
_, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", dbMap.Dialect.QuotedTableForQuery(schemaName, tableName)))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Sort migrations that have been run by Id.
|
||||
var existingMigrations []*Migration
|
||||
for _, migrationRecord := range migrationRecords {
|
||||
existingMigrations = append(existingMigrations, &Migration{
|
||||
Id: migrationRecord.Id,
|
||||
})
|
||||
}
|
||||
sort.Sort(byId(existingMigrations))
|
||||
|
||||
// Get last migration that was run
|
||||
record := &Migration{}
|
||||
if len(existingMigrations) > 0 {
|
||||
record = existingMigrations[len(existingMigrations)-1]
|
||||
}
|
||||
|
||||
result := make([]*PlannedMigration, 0)
|
||||
|
||||
// Add missing migrations up to the last run migration.
|
||||
// This can happen for example when merges happened.
|
||||
if len(existingMigrations) > 0 {
|
||||
result = append(result, ToCatchup(migrations, existingMigrations, record)...)
|
||||
}
|
||||
|
||||
// Figure out which migrations to apply
|
||||
toApply := ToApply(migrations, record.Id, dir)
|
||||
toApplyCount := len(toApply)
|
||||
if max > 0 && max < toApplyCount {
|
||||
toApplyCount = max
|
||||
}
|
||||
for _, v := range toApply[0:toApplyCount] {
|
||||
|
||||
if dir == Up {
|
||||
result = append(result, &PlannedMigration{
|
||||
Migration: v,
|
||||
Queries: v.Up,
|
||||
DisableTransaction: v.DisableTransactionUp,
|
||||
})
|
||||
} else if dir == Down {
|
||||
result = append(result, &PlannedMigration{
|
||||
Migration: v,
|
||||
Queries: v.Down,
|
||||
DisableTransaction: v.DisableTransactionDown,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, dbMap, nil
|
||||
}
|
||||
|
||||
// Filter a slice of migrations into ones that should be applied.
|
||||
func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration {
|
||||
var index = -1
|
||||
if current != "" {
|
||||
for index < len(migrations)-1 {
|
||||
index++
|
||||
if migrations[index].Id == current {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if direction == Up {
|
||||
return migrations[index+1:]
|
||||
} else if direction == Down {
|
||||
if index == -1 {
|
||||
return []*Migration{}
|
||||
}
|
||||
|
||||
// Add in reverse order
|
||||
toApply := make([]*Migration, index+1)
|
||||
for i := 0; i < index+1; i++ {
|
||||
toApply[index-i] = migrations[i]
|
||||
}
|
||||
return toApply
|
||||
}
|
||||
|
||||
panic("Not possible")
|
||||
}
|
||||
|
||||
func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration {
|
||||
missing := make([]*PlannedMigration, 0)
|
||||
for _, migration := range migrations {
|
||||
found := false
|
||||
for _, existing := range existingMigrations {
|
||||
if existing.Id == migration.Id {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found && migration.Less(lastRun) {
|
||||
missing = append(missing, &PlannedMigration{
|
||||
Migration: migration,
|
||||
Queries: migration.Up,
|
||||
DisableTransaction: migration.DisableTransactionUp,
|
||||
})
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) {
|
||||
dbMap, err := getMigrationDbMap(db, dialect)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var records []*MigrationRecord
|
||||
query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", dbMap.Dialect.QuotedTableForQuery(schemaName, tableName))
|
||||
_, err = dbMap.Select(&records, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
|
||||
d, ok := MigrationDialects[dialect]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unknown dialect: %s", dialect)
|
||||
}
|
||||
|
||||
// When using the mysql driver, make sure that the parseTime option is
|
||||
// configured, otherwise it won't map time columns to time.Time. See
|
||||
// https://github.com/rubenv/sql-migrate/issues/2
|
||||
if dialect == "mysql" {
|
||||
var out *time.Time
|
||||
err := db.QueryRow("SELECT NOW()").Scan(&out)
|
||||
if err != nil {
|
||||
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" {
|
||||
return nil, errors.New(`Cannot parse dates.
|
||||
|
||||
Make sure that the parseTime option is supplied to your database connection.
|
||||
Check https://github.com/go-sql-driver/mysql#parsetime for more info.`)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create migration database map
|
||||
dbMap := &gorp.DbMap{Db: db, Dialect: d}
|
||||
dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id")
|
||||
//dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds))
|
||||
|
||||
err := dbMap.CreateTablesIfNotExists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbMap, nil
|
||||
}
|
||||
|
||||
// TODO: Run migration + record insert in transaction.
|
|
@ -0,0 +1,28 @@
|
|||
# SQL migration parser
|
||||
|
||||
Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser.
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (C) 2014 by Ruben Vermeersch <ruben@rocketeer.be>
|
||||
Copyright (C) 2012-2014 by Liam Staskawicz
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,193 @@
|
|||
package sqlparse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
sqlCmdPrefix = "-- +migrate "
|
||||
optionNoTransaction = "notransaction"
|
||||
)
|
||||
|
||||
type ParsedMigration struct {
|
||||
UpStatements []string
|
||||
DownStatements []string
|
||||
|
||||
DisableTransactionUp bool
|
||||
DisableTransactionDown bool
|
||||
}
|
||||
|
||||
// Checks the line to see if the line has a statement-ending semicolon
|
||||
// or if the line contains a double-dash comment.
|
||||
func endsWithSemicolon(line string) bool {
|
||||
|
||||
prev := ""
|
||||
scanner := bufio.NewScanner(strings.NewReader(line))
|
||||
scanner.Split(bufio.ScanWords)
|
||||
|
||||
for scanner.Scan() {
|
||||
word := scanner.Text()
|
||||
if strings.HasPrefix(word, "--") {
|
||||
break
|
||||
}
|
||||
prev = word
|
||||
}
|
||||
|
||||
return strings.HasSuffix(prev, ";")
|
||||
}
|
||||
|
||||
type migrationDirection int
|
||||
|
||||
const (
|
||||
directionNone migrationDirection = iota
|
||||
directionUp
|
||||
directionDown
|
||||
)
|
||||
|
||||
type migrateCommand struct {
|
||||
Command string
|
||||
Options []string
|
||||
}
|
||||
|
||||
func (c *migrateCommand) HasOption(opt string) bool {
|
||||
for _, specifiedOption := range c.Options {
|
||||
if specifiedOption == opt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func parseCommand(line string) (*migrateCommand, error) {
|
||||
cmd := &migrateCommand{}
|
||||
|
||||
if !strings.HasPrefix(line, sqlCmdPrefix) {
|
||||
return nil, errors.New("ERROR: not a sql-migrate command")
|
||||
}
|
||||
|
||||
fields := strings.Fields(line[len(sqlCmdPrefix):])
|
||||
if len(fields) == 0 {
|
||||
return nil, errors.New(`ERROR: incomplete migration command`)
|
||||
}
|
||||
|
||||
cmd.Command = fields[0]
|
||||
|
||||
cmd.Options = fields[1:]
|
||||
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
// Split the given sql script into individual statements.
|
||||
//
|
||||
// The base case is to simply split on semicolons, as these
|
||||
// naturally terminate a statement.
|
||||
//
|
||||
// However, more complex cases like pl/pgsql can have semicolons
|
||||
// within a statement. For these cases, we provide the explicit annotations
|
||||
// 'StatementBegin' and 'StatementEnd' to allow the script to
|
||||
// tell us to ignore semicolons.
|
||||
func ParseMigration(r io.ReadSeeker) (*ParsedMigration, error) {
|
||||
p := &ParsedMigration{}
|
||||
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
scanner := bufio.NewScanner(r)
|
||||
|
||||
statementEnded := false
|
||||
ignoreSemicolons := false
|
||||
currentDirection := directionNone
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// handle any migrate-specific commands
|
||||
if strings.HasPrefix(line, sqlCmdPrefix) {
|
||||
cmd, err := parseCommand(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch cmd.Command {
|
||||
case "Up":
|
||||
currentDirection = directionUp
|
||||
if cmd.HasOption(optionNoTransaction) {
|
||||
p.DisableTransactionUp = true
|
||||
}
|
||||
break
|
||||
|
||||
case "Down":
|
||||
currentDirection = directionDown
|
||||
if cmd.HasOption(optionNoTransaction) {
|
||||
p.DisableTransactionDown = true
|
||||
}
|
||||
break
|
||||
|
||||
case "StatementBegin":
|
||||
if currentDirection != directionNone {
|
||||
ignoreSemicolons = true
|
||||
}
|
||||
break
|
||||
|
||||
case "StatementEnd":
|
||||
if currentDirection != directionNone {
|
||||
statementEnded = (ignoreSemicolons == true)
|
||||
ignoreSemicolons = false
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if currentDirection == directionNone {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(line + "\n"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement
|
||||
// Lines that end with semicolon that are in a statement block
|
||||
// do not conclude statement.
|
||||
if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded {
|
||||
statementEnded = false
|
||||
switch currentDirection {
|
||||
case directionUp:
|
||||
p.UpStatements = append(p.UpStatements, buf.String())
|
||||
|
||||
case directionDown:
|
||||
p.DownStatements = append(p.DownStatements, buf.String())
|
||||
|
||||
default:
|
||||
panic("impossible state")
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// diagnose likely migration script errors
|
||||
if ignoreSemicolons {
|
||||
return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'")
|
||||
}
|
||||
|
||||
if currentDirection == directionNone {
|
||||
return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed.
|
||||
See https://github.com/rubenv/sql-migrate for details.`)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
|
@ -1,352 +0,0 @@
|
|||
Meddler [![Build Status](https://travis-ci.org/russross/meddler.svg?branch=master)](https://travis-ci.org/russross/meddler)
|
||||
=======
|
||||
|
||||
Meddler is a small toolkit to take some of the tedium out of moving data
|
||||
back and forth between sql queries and structs.
|
||||
|
||||
It is not a complete ORM. It is intended to be lightweight way to add some
|
||||
of the convenience of an ORM while leaving more control in the hands of the
|
||||
programmer.
|
||||
|
||||
Package docs are available at:
|
||||
|
||||
* http://godoc.org/github.com/russross/meddler
|
||||
|
||||
The package is housed on github, and the README there has more info:
|
||||
|
||||
* http://github.com/russross/meddler
|
||||
|
||||
This is currently configured for SQLite, MySQL, and PostgreSQL, but it
|
||||
can be configured for use with other databases. If you use it
|
||||
successfully with a different database, please contact me and I will
|
||||
add it to the list of pre-configured databases.
|
||||
|
||||
### DANGER
|
||||
|
||||
Meddler is still a work in progress, and additional
|
||||
backward-incompatible changes to the API are likely. The most recent
|
||||
change added support for multiple database types and made it easier
|
||||
to switch between them. This is most likely to affect the way you
|
||||
initialize the library to work with your database (see the install
|
||||
section below).
|
||||
|
||||
Another recent update is the change to int64 for primary keys. This
|
||||
matches the convention used in database/sql, and is more portable,
|
||||
but it may require some minor changes to existing code.
|
||||
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
The usual `go get` command will put it in your `$GOPATH`:
|
||||
|
||||
go get github.com/russross/meddler
|
||||
|
||||
If you are only using one type of database, you should set Default
|
||||
to match your database type, e.g.:
|
||||
|
||||
meddler.Default = meddler.PostgreSQL
|
||||
|
||||
The default database is MySQL, so you should change it for anything
|
||||
else. To use multiple databases within a single project, or to use a
|
||||
database other than MySQL, PostgreSQL, or SQLite, see below.
|
||||
|
||||
Note: If you are using MySQL with the `github.com/go-sql-driver/mysql`
|
||||
driver, you must set "parseTime=true" in the sql.Open call or the
|
||||
time conversion meddlers will not work.
|
||||
|
||||
|
||||
Why?
|
||||
----
|
||||
|
||||
These are the features that set meddler apart from similar
|
||||
libraries:
|
||||
|
||||
* It uses standard database/sql types, and does not require
|
||||
special fields in your structs. This lets you use meddler
|
||||
selectively, without having to alter other database code already
|
||||
in your project. After creating meddler, I incorporated it into
|
||||
an existing project, and I was able to convert the code one
|
||||
struct and one query at a time.
|
||||
* It leaves query writing to you. It has convenience functions for
|
||||
simple INSERT/UPDATE/SELECT queries by integer primary key, but
|
||||
beyond that it stays out of query writing.
|
||||
* It supports on-the-fly data transformations. If you have a map
|
||||
or a slice in your struct, you can instruct meddler to
|
||||
encode/decode using JSON or Gob automatically. If you have time
|
||||
fields, you can have meddler automatically write them into the
|
||||
database as UTC, and convert them to the local time zone on
|
||||
reads. These processors are called “meddlers”, because they
|
||||
meddle with the data instead of passing it through directly.
|
||||
* NULL fields in the database can be read as zero values in the
|
||||
struct, and zero values in the struct can be written as NULL
|
||||
values. This is not always the right thing to do, but it is
|
||||
often good enough and is much simpler than most alternatives.
|
||||
* It exposes low-level hooks for more complex situations. If you
|
||||
are writing a query that does not map well to the main helper
|
||||
functions, you can still get some help by using the lower-level
|
||||
functions to build your own helpers.
|
||||
|
||||
|
||||
High-level functions
|
||||
--------------------
|
||||
|
||||
Meddler does not create or alter tables. It just provides a little
|
||||
glue to make it easier to read and write structs as SQL rows. Start
|
||||
by annotating a struct:
|
||||
|
||||
``` go
|
||||
type Person struct {
|
||||
ID int `meddler:"id,pk"`
|
||||
Name string `meddler:"name"`
|
||||
Age int
|
||||
salary int
|
||||
Created time.Time `meddler:"created,localtime"`
|
||||
Closed time.Time `meddler:",localtimez"`
|
||||
}
|
||||
```
|
||||
|
||||
Notes about this example:
|
||||
|
||||
* If the optional tag is provided, the first field is the database
|
||||
column name. Note that "Closed" does not provide a column name,
|
||||
so it will default to "Closed". Likewise, if there is no tag,
|
||||
the field name will be used.
|
||||
* ID is marked as the primary key. Currently only integer primary
|
||||
keys are supported. This is only relevant to Load, Save, Insert,
|
||||
and Update, a few of the higher-level functions that need to
|
||||
understand primary keys. Meddler assumes that pk fields have an
|
||||
autoincrement mechanism set in the database.
|
||||
* Age has a column name of "Age". A tag is only necessary when the
|
||||
column name is not the same as the field name, or when you need
|
||||
to select other options.
|
||||
* salary is not an exported field, so meddler does not see it. It
|
||||
will be ignored.
|
||||
* Created is marked with "localtime". This means that it will be
|
||||
converted to UTC when being saved, and back to the local time
|
||||
zone when being loaded.
|
||||
* Closed has a column name of "Closed", since the tag did not
|
||||
specify anything different. Closed is marked as "localtimez".
|
||||
This has the same properties as "localtime", except that the
|
||||
zero time will be saved in the database as a null column (and
|
||||
null values will be loaded as the zero time value).
|
||||
|
||||
Meddler provides a few high-level functions (note: DB is an
|
||||
interface that works with a *sql.DB or a *sql.Tx):
|
||||
|
||||
* Load(db DB, table string, dst interface{}, pk int64) error
|
||||
|
||||
This loads a single record by its primary key. For example:
|
||||
|
||||
elt := new(Person)
|
||||
err := meddler.Load(db, "person", elt, 15)
|
||||
|
||||
db can be a *sql.DB or a *sql.Tx. The table is the name of the
|
||||
table, pk is the primary key value, and dst is a pointer to the
|
||||
struct where it should be stored.
|
||||
|
||||
Note: this call requires that the struct have an integer primary
|
||||
key field marked.
|
||||
|
||||
* Insert(db DB, table string, src interface{}) error
|
||||
|
||||
This inserts a new row into the database. If the struct value
|
||||
has a primary key field, it must be zero (and will be omitted
|
||||
from the insert statement, prompting a default autoincrement
|
||||
value).
|
||||
|
||||
elt := &Person{
|
||||
Name: "Alice",
|
||||
Age: 22,
|
||||
// ...
|
||||
}
|
||||
err := meddler.Insert(db, "person", elt)
|
||||
// elt.ID is updated to the value assigned by the database
|
||||
|
||||
* Update(db DB, table string, src interface{}) error
|
||||
|
||||
This updates an existing row. It must have a primary key, which
|
||||
must be non-zero.
|
||||
|
||||
Note: this call requires that the struct have an integer primary
|
||||
key field marked.
|
||||
|
||||
* Save(db DB, table string, src interface{}) error
|
||||
|
||||
Pick Insert or Update automatically. If there is a non-zero
|
||||
primary key present, it uses Update, otherwise it uses Insert.
|
||||
|
||||
Note: this call requires that the struct have an integer primary
|
||||
key field marked.
|
||||
|
||||
* QueryRow(db DB, dst interface{}, query string, args ...interface) error
|
||||
|
||||
Perform the given query, and scan the single-row result into
|
||||
dst, which must be a pointer to a struct.
|
||||
|
||||
For example:
|
||||
|
||||
elt := new(Person)
|
||||
err := meddler.QueryRow(db, elt, "select * from person where name = ?", "bob")
|
||||
|
||||
* QueryAll(db DB, dst interface{}, query string, args ...interface) error
|
||||
|
||||
Perform the given query, and scan the results into dst, which
|
||||
must be a pointer to a slice of struct pointers.
|
||||
|
||||
For example:
|
||||
|
||||
var people []*Person
|
||||
err := meddler.QueryAll(db, &people, "select * from person")
|
||||
|
||||
* Scan(rows *sql.Rows, dst interface{}) error
|
||||
|
||||
Scans a single row of data into a struct, complete with
|
||||
meddling. Can be called repeatedly to walk through all of the
|
||||
rows in a result set. Returns sql.ErrNoRows when there is no
|
||||
more data.
|
||||
|
||||
* ScanRow(rows *sql.Rows, dst interface{}) error
|
||||
|
||||
Similar to Scan, but guarantees that the rows object
|
||||
is closed when it returns. Also returns sql.ErrNoRows if there
|
||||
was no row.
|
||||
|
||||
* ScanAll(rows *sql.Rows, dst interface{}) error
|
||||
|
||||
Expects a pointer to a slice of structs/pointers to structs, and
|
||||
appends as many elements as it finds in the row set. Closes the
|
||||
row set when it is finished. Does not return sql.ErrNoRows on an
|
||||
empty set; instead it just does not add anything to the slice.
|
||||
|
||||
Note: all of these functions can also be used as methods on Database
|
||||
objects. When used as package functions, they use the Default
|
||||
Database object, which is MySQL unless you change it.
|
||||
|
||||
|
||||
Meddlers
|
||||
--------
|
||||
|
||||
A meddler is a handler that gets to meddle with a field before it is
|
||||
saved, or when it is loaded. "localtime" and "localtimez" are
|
||||
examples of built-in meddlers. The full list of built-in meddlers
|
||||
includes:
|
||||
|
||||
* identity: the default meddler, which does not do anything
|
||||
|
||||
* localtime: for time.Time and *time.Time fields. Converts the
|
||||
value to UTC on save, and back to the local time zone on loads.
|
||||
To set your local time zone, make sure the TZ environment
|
||||
variable is set when your program is launched, or use something
|
||||
like:
|
||||
|
||||
os.Setenv("TZ", "America/Denver")
|
||||
|
||||
in your initial setup, before you start using time functions.
|
||||
|
||||
* localtimez: same, but only for time.Time, and treats the zero
|
||||
time as a null field (converts both ways)
|
||||
|
||||
* utctime: similar to localtime, but keeps the value in UTC on
|
||||
loads. This ensures that the time is always coverted to UTC on
|
||||
save, which is the sane way to save time values in a database.
|
||||
|
||||
* utctimez: same, but with zero time means null.
|
||||
|
||||
* zeroisnull: for other types where a zero value should be
|
||||
inserted as null, and null values should be read as zero values.
|
||||
Works for integer, unsigned integer, float, complex number, and
|
||||
string types. Note: not for pointer types.
|
||||
|
||||
* json: marshals the field value into JSON when saving, and
|
||||
unmarshals on load.
|
||||
|
||||
* jsongzip: same, but compresses using gzip on save, and
|
||||
uncompresses on load
|
||||
|
||||
* gob: encodes the field value using Gob when saving, and
|
||||
decodes on load.
|
||||
|
||||
* gobgzip: same, but compresses using gzip on save, and
|
||||
uncompresses on load
|
||||
|
||||
You can implement custom meddlers as well by implementing the
|
||||
Meddler interface. See the existing implementations in medder.go for
|
||||
examples.
|
||||
|
||||
|
||||
Working with different database types
|
||||
-------------------------------------
|
||||
|
||||
Meddler can work with multiple database types simultaneously.
|
||||
Database-specific parameters are stored in a Database struct, and
|
||||
structs are pre-defined for MySQL, PostgreSQL, and SQLite.
|
||||
|
||||
Instead of relying on the package-level functions, use the method
|
||||
form on the appropriate database type, e.g.:
|
||||
|
||||
err = meddler.PostgreSQL.Load(...)
|
||||
|
||||
instead of
|
||||
|
||||
err = meddler.Load(...)
|
||||
|
||||
Or to save typing, define your own abbreviated name for each
|
||||
database:
|
||||
|
||||
ms := meddler.MySQL
|
||||
pg := meddler.PostgreSQL
|
||||
err = ms.Load(...)
|
||||
err = pg.QueryAll(...)
|
||||
|
||||
If you need a different database, create your own Database instance
|
||||
with the appropriate parameters set. If everything works okay,
|
||||
please contact me with the parameters you used so I can add the new
|
||||
database to the pre-defined list.
|
||||
|
||||
|
||||
Lower-level functions
|
||||
---------------------
|
||||
|
||||
If you are using more complex queries and just want to reduce the
|
||||
tedium of reading and writing values, there are some lower-level
|
||||
helper functions as well. See the package docs for details, and
|
||||
see the implementations of the higher-level functions to see how
|
||||
they are used.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Meddler is distributed under the BSD 2-Clause License. If this
|
||||
license prevents you from using Meddler in your project, please
|
||||
contact me and I will consider adding an additional license that is
|
||||
better suited to your needs.
|
||||
|
||||
> Copyright © 2013 Russ Ross.
|
||||
> All rights reserved.
|
||||
>
|
||||
> Redistribution and use in source and binary forms, with or without
|
||||
> modification, are permitted provided that the following conditions
|
||||
> are met:
|
||||
>
|
||||
> 1. Redistributions of source code must retain the above copyright
|
||||
> notice, this list of conditions and the following disclaimer.
|
||||
>
|
||||
> 2. Redistributions in binary form must reproduce the above
|
||||
> copyright notice, this list of conditions and the following
|
||||
> disclaimer in the documentation and/or other materials provided with
|
||||
> the distribution.
|
||||
>
|
||||
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
> POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
Meddler is a small toolkit to take some of the tedium out of moving data
|
||||
back and forth between sql queries and structs.
|
||||
|
||||
It is not a complete ORM. It is intended to be lightweight way to add some
|
||||
of the convenience of an ORM while leaving more control in the hands of the
|
||||
programmer.
|
||||
|
||||
Package docs are available at:
|
||||
|
||||
http://godoc.org/github.com/russross/meddler
|
||||
|
||||
The package is housed on github, and the README there has more info:
|
||||
|
||||
http://github.com/russross/meddler
|
||||
|
||||
*/
|
||||
package meddler
|
|
@ -1,247 +0,0 @@
|
|||
package meddler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type dbErr struct {
|
||||
msg string
|
||||
err error
|
||||
}
|
||||
|
||||
func (err *dbErr) Error() string {
|
||||
return fmt.Sprintf("%s: %v", err.msg, err.err)
|
||||
}
|
||||
|
||||
// DriverErr returns the original error as returned by the database driver
|
||||
// if the error comes from the driver, with the second value set to true.
|
||||
// Otherwise, it returns err itself with false as second value.
|
||||
func DriverErr(err error) (error, bool) {
|
||||
if dbe, ok := err.(*dbErr); ok {
|
||||
return dbe.err, true
|
||||
}
|
||||
return err, false
|
||||
}
|
||||
|
||||
// DB is a generic database interface, matching both *sql.Db and *sql.Tx
|
||||
type DB interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
// Load loads a record using a query for the primary key field.
|
||||
// Returns sql.ErrNoRows if not found.
|
||||
func (d *Database) Load(db DB, table string, dst interface{}, pk int64) error {
|
||||
columns, err := d.ColumnsQuoted(dst, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure we have a primary key field
|
||||
pkName, _, err := d.PrimaryKey(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pkName == "" {
|
||||
return fmt.Errorf("meddler.Load: no primary key field found")
|
||||
}
|
||||
|
||||
// run the query
|
||||
q := fmt.Sprintf("SELECT %s FROM %s WHERE %s = %s", columns, d.quoted(table), d.quoted(pkName), d.Placeholder)
|
||||
|
||||
rows, err := db.Query(q, pk)
|
||||
if err != nil {
|
||||
return &dbErr{msg: "meddler.Load: DB error in Query", err: err}
|
||||
}
|
||||
|
||||
// scan the row
|
||||
return d.ScanRow(rows, dst)
|
||||
}
|
||||
|
||||
// Load using the Default Database type
|
||||
func Load(db DB, table string, dst interface{}, pk int64) error {
|
||||
return Default.Load(db, table, dst, pk)
|
||||
}
|
||||
|
||||
// Insert performs an INSERT query for the given record.
|
||||
// If the record has a primary key flagged, it must be zero, and it
|
||||
// will be set to the newly-allocated primary key value from the database
|
||||
// as returned by LastInsertId.
|
||||
func (d *Database) Insert(db DB, table string, src interface{}) error {
|
||||
pkName, pkValue, err := d.PrimaryKey(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pkName != "" && pkValue != 0 {
|
||||
return fmt.Errorf("meddler.Insert: primary key must be zero")
|
||||
}
|
||||
|
||||
// gather the query parts
|
||||
namesPart, err := d.ColumnsQuoted(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valuesPart, err := d.PlaceholdersString(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values, err := d.Values(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run the query
|
||||
q := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", d.quoted(table), namesPart, valuesPart)
|
||||
if d.UseReturningToGetID && pkName != "" {
|
||||
q += " RETURNING " + d.quoted(pkName)
|
||||
var newPk int64
|
||||
err := db.QueryRow(q, values...).Scan(&newPk)
|
||||
if err != nil {
|
||||
return &dbErr{msg: "meddler.Insert: DB error in QueryRow", err: err}
|
||||
}
|
||||
if err = d.SetPrimaryKey(src, newPk); err != nil {
|
||||
return fmt.Errorf("meddler.Insert: Error saving updated pk: %v", err)
|
||||
}
|
||||
} else if pkName != "" {
|
||||
result, err := db.Exec(q, values...)
|
||||
if err != nil {
|
||||
return &dbErr{msg: "meddler.Insert: DB error in Exec", err: err}
|
||||
}
|
||||
|
||||
// save the new primary key
|
||||
newPk, err := result.LastInsertId()
|
||||
if err != nil {
|
||||
return &dbErr{msg: "meddler.Insert: DB error getting new primary key value", err: err}
|
||||
}
|
||||
if err = d.SetPrimaryKey(src, newPk); err != nil {
|
||||
return fmt.Errorf("meddler.Insert: Error saving updated pk: %v", err)
|
||||
}
|
||||
} else {
|
||||
// no primary key, so no need to lookup new value
|
||||
_, err := db.Exec(q, values...)
|
||||
if err != nil {
|
||||
return &dbErr{msg: "meddler.Insert: DB error in Exec", err: err}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert using the Default Database type
|
||||
func Insert(db DB, table string, src interface{}) error {
|
||||
return Default.Insert(db, table, src)
|
||||
}
|
||||
|
||||
// Update performs and UPDATE query for the given record.
|
||||
// The record must have an integer primary key field that is non-zero,
|
||||
// and it will be used to select the database row that gets updated.
|
||||
func (d *Database) Update(db DB, table string, src interface{}) error {
|
||||
// gather the query parts
|
||||
names, err := d.Columns(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
placeholders, err := d.Placeholders(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values, err := d.Values(src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// form the column=placeholder pairs
|
||||
var pairs []string
|
||||
for i := 0; i < len(names) && i < len(placeholders); i++ {
|
||||
pair := fmt.Sprintf("%s=%s", d.quoted(names[i]), placeholders[i])
|
||||
pairs = append(pairs, pair)
|
||||
}
|
||||
|
||||
pkName, pkValue, err := d.PrimaryKey(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pkName == "" {
|
||||
return fmt.Errorf("meddler.Update: no primary key field")
|
||||
}
|
||||
if pkValue < 1 {
|
||||
return fmt.Errorf("meddler.Update: primary key must be an integer > 0")
|
||||
}
|
||||
ph := d.placeholder(len(placeholders) + 1)
|
||||
|
||||
// run the query
|
||||
q := fmt.Sprintf("UPDATE %s SET %s WHERE %s=%s", d.quoted(table),
|
||||
strings.Join(pairs, ","),
|
||||
d.quoted(pkName), ph)
|
||||
values = append(values, pkValue)
|
||||
|
||||
if _, err := db.Exec(q, values...); err != nil {
|
||||
return &dbErr{msg: "meddler.Update: DB error in Exec", err: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update using the Default Database type
|
||||
func Update(db DB, table string, src interface{}) error {
|
||||
return Default.Update(db, table, src)
|
||||
}
|
||||
|
||||
// Save performs an INSERT or an UPDATE, depending on whether or not
|
||||
// a primary keys exists and is non-zero.
|
||||
func (d *Database) Save(db DB, table string, src interface{}) error {
|
||||
pkName, pkValue, err := d.PrimaryKey(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pkName != "" && pkValue != 0 {
|
||||
return d.Update(db, table, src)
|
||||
} else {
|
||||
return d.Insert(db, table, src)
|
||||
}
|
||||
}
|
||||
|
||||
// Save using the Default Database type
|
||||
func Save(db DB, table string, src interface{}) error {
|
||||
return Default.Save(db, table, src)
|
||||
}
|
||||
|
||||
// QueryOne performs the given query with the given arguments, scanning a
|
||||
// single row of results into dst. Returns sql.ErrNoRows if there was no
|
||||
// result row.
|
||||
func (d *Database) QueryRow(db DB, dst interface{}, query string, args ...interface{}) error {
|
||||
// perform the query
|
||||
rows, err := db.Query(query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gather the result
|
||||
return d.ScanRow(rows, dst)
|
||||
}
|
||||
|
||||
// QueryRow using the Default Database type
|
||||
func QueryRow(db DB, dst interface{}, query string, args ...interface{}) error {
|
||||
return Default.QueryRow(db, dst, query, args...)
|
||||
}
|
||||
|
||||
// QueryAll performs the given query with the given arguments, scanning
|
||||
// all results rows into dst.
|
||||
func (d *Database) QueryAll(db DB, dst interface{}, query string, args ...interface{}) error {
|
||||
// perform the query
|
||||
rows, err := db.Query(query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gather the results
|
||||
return d.ScanAll(rows, dst)
|
||||
}
|
||||
|
||||
// QueryAll using the Default Database type
|
||||
func QueryAll(db DB, dst interface{}, query string, args ...interface{}) error {
|
||||
return Default.QueryAll(db, dst, query, args...)
|
||||
}
|
|
@ -1,351 +0,0 @@
|
|||
package meddler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Meddler is the interface for a field meddler. Implementations can be
|
||||
// registered to convert struct fields being loaded and saved in the database.
|
||||
type Meddler interface {
|
||||
// PreRead is called before a Scan operation. It is given a pointer to
|
||||
// the raw struct field, and returns the value that will be given to
|
||||
// the database driver.
|
||||
PreRead(fieldAddr interface{}) (scanTarget interface{}, err error)
|
||||
|
||||
// PostRead is called after a Scan operation. It is given the value returned
|
||||
// by PreRead and a pointer to the raw struct field. It is expected to fill
|
||||
// in the struct field if the two are different.
|
||||
PostRead(fieldAddr interface{}, scanTarget interface{}) error
|
||||
|
||||
// PreWrite is called before an Insert or Update operation. It is given
|
||||
// a pointer to the raw struct field, and returns the value that will be
|
||||
// given to the database driver.
|
||||
PreWrite(field interface{}) (saveValue interface{}, err error)
|
||||
}
|
||||
|
||||
// Register sets up a meddler type. Meddlers get a chance to meddle with the
|
||||
// data being loaded or saved when a field is annotated with the name of the meddler.
|
||||
// The registry is global.
|
||||
func Register(name string, m Meddler) {
|
||||
if name == "pk" {
|
||||
panic("meddler.Register: pk cannot be used as a meddler name")
|
||||
}
|
||||
registry[name] = m
|
||||
}
|
||||
|
||||
var registry = make(map[string]Meddler)
|
||||
|
||||
func init() {
|
||||
Register("identity", IdentityMeddler(false))
|
||||
Register("localtime", TimeMeddler{ZeroIsNull: false, Local: true})
|
||||
Register("localtimez", TimeMeddler{ZeroIsNull: true, Local: true})
|
||||
Register("utctime", TimeMeddler{ZeroIsNull: false, Local: false})
|
||||
Register("utctimez", TimeMeddler{ZeroIsNull: true, Local: false})
|
||||
Register("zeroisnull", ZeroIsNullMeddler(false))
|
||||
Register("json", JSONMeddler(false))
|
||||
Register("jsongzip", JSONMeddler(true))
|
||||
Register("gob", GobMeddler(false))
|
||||
Register("gobgzip", GobMeddler(true))
|
||||
}
|
||||
|
||||
// IdentityMeddler is the default meddler, and it passes the original value through with
|
||||
// no changes.
|
||||
type IdentityMeddler bool
|
||||
|
||||
func (elt IdentityMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
return fieldAddr, nil
|
||||
}
|
||||
|
||||
func (elt IdentityMeddler) PostRead(fieldAddr, scanTarget interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (elt IdentityMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) {
|
||||
return field, nil
|
||||
}
|
||||
|
||||
// TimeMeddler provides useful operations on time.Time fields. It can convert the zero time
|
||||
// to and from a null column, and it can convert the time zone to UTC on save and to Local on load.
|
||||
type TimeMeddler struct {
|
||||
ZeroIsNull bool
|
||||
Local bool
|
||||
}
|
||||
|
||||
func (elt TimeMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
switch tgt := fieldAddr.(type) {
|
||||
case *time.Time:
|
||||
if elt.ZeroIsNull {
|
||||
return &tgt, nil
|
||||
}
|
||||
return fieldAddr, nil
|
||||
case **time.Time:
|
||||
if elt.ZeroIsNull {
|
||||
return nil, fmt.Errorf("meddler.TimeMeddler cannot be used on a *time.Time field, only time.Time")
|
||||
}
|
||||
return fieldAddr, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("meddler.TimeMeddler.PreRead: unknown struct field type: %T", fieldAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (elt TimeMeddler) PostRead(fieldAddr, scanTarget interface{}) error {
|
||||
switch tgt := fieldAddr.(type) {
|
||||
case *time.Time:
|
||||
if elt.ZeroIsNull {
|
||||
src := scanTarget.(**time.Time)
|
||||
if *src == nil {
|
||||
*tgt = time.Time{}
|
||||
} else if elt.Local {
|
||||
*tgt = (*src).Local()
|
||||
} else {
|
||||
*tgt = (*src).UTC()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
src := scanTarget.(*time.Time)
|
||||
if elt.Local {
|
||||
*tgt = src.Local()
|
||||
} else {
|
||||
*tgt = src.UTC()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case **time.Time:
|
||||
if elt.ZeroIsNull {
|
||||
return fmt.Errorf("meddler TimeMeddler cannot be used on a *time.Time field, only time.Time")
|
||||
}
|
||||
src := scanTarget.(**time.Time)
|
||||
if *src == nil {
|
||||
*tgt = nil
|
||||
} else if elt.Local {
|
||||
**src = (*src).Local()
|
||||
*tgt = *src
|
||||
} else {
|
||||
**src = (*src).UTC()
|
||||
*tgt = *src
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("meddler.TimeMeddler.PostRead: unknown struct field type: %T", fieldAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (elt TimeMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) {
|
||||
switch tgt := field.(type) {
|
||||
case time.Time:
|
||||
if elt.ZeroIsNull && tgt.IsZero() {
|
||||
return nil, nil
|
||||
}
|
||||
return tgt.UTC(), nil
|
||||
|
||||
case *time.Time:
|
||||
if tgt == nil || elt.ZeroIsNull && tgt.IsZero() {
|
||||
return nil, nil
|
||||
}
|
||||
return tgt.UTC(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("meddler.TimeMeddler.PreWrite: unknown struct field type: %T", field)
|
||||
}
|
||||
}
|
||||
|
||||
// ZeroIsNullMeddler converts zero value fields (integers both signed and unsigned, floats, complex numbers,
|
||||
// and strings) to and from null database columns.
|
||||
type ZeroIsNullMeddler bool
|
||||
|
||||
func (elt ZeroIsNullMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
// create a pointer to this element
|
||||
// the database driver will set it to nil if the column value is null
|
||||
return reflect.New(reflect.TypeOf(fieldAddr)).Interface(), nil
|
||||
}
|
||||
|
||||
func (elt ZeroIsNullMeddler) PostRead(fieldAddr, scanTarget interface{}) error {
|
||||
sv := reflect.ValueOf(scanTarget)
|
||||
fv := reflect.ValueOf(fieldAddr)
|
||||
if sv.Elem().IsNil() {
|
||||
// null column, so set target to be zero value
|
||||
fv.Elem().Set(reflect.Zero(fv.Elem().Type()))
|
||||
} else {
|
||||
// copy the value that scan found
|
||||
fv.Elem().Set(sv.Elem().Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (elt ZeroIsNullMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) {
|
||||
val := reflect.ValueOf(field)
|
||||
switch val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if val.Int() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
if val.Uint() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if val.Float() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
if val.Complex() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.String:
|
||||
if val.String() == "" {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.Bool:
|
||||
if !val.Bool() {
|
||||
return nil, nil
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("ZeroIsNullMeddler.PreWrite: unknown struct field type: %T", field)
|
||||
}
|
||||
|
||||
return field, nil
|
||||
}
|
||||
|
||||
type JSONMeddler bool
|
||||
|
||||
func (zip JSONMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
// give a pointer to a byte buffer to grab the raw data
|
||||
return new([]byte), nil
|
||||
}
|
||||
|
||||
func (zip JSONMeddler) PostRead(fieldAddr, scanTarget interface{}) error {
|
||||
ptr := scanTarget.(*[]byte)
|
||||
if ptr == nil {
|
||||
return fmt.Errorf("JSONMeddler.PostRead: nil pointer")
|
||||
}
|
||||
raw := *ptr
|
||||
|
||||
if zip {
|
||||
// un-gzip and decode json
|
||||
gzipReader, err := gzip.NewReader(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating gzip Reader: %v", err)
|
||||
}
|
||||
defer gzipReader.Close()
|
||||
jsonDecoder := json.NewDecoder(gzipReader)
|
||||
if err := jsonDecoder.Decode(fieldAddr); err != nil {
|
||||
return fmt.Errorf("JSON decoder/gzip error: %v", err)
|
||||
}
|
||||
if err := gzipReader.Close(); err != nil {
|
||||
return fmt.Errorf("Closing gzip reader: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode json
|
||||
jsonDecoder := json.NewDecoder(bytes.NewReader(raw))
|
||||
if err := jsonDecoder.Decode(fieldAddr); err != nil {
|
||||
return fmt.Errorf("JSON decode error: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zip JSONMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
if zip {
|
||||
// json encode and gzip
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
defer gzipWriter.Close()
|
||||
jsonEncoder := json.NewEncoder(gzipWriter)
|
||||
if err := jsonEncoder.Encode(field); err != nil {
|
||||
return nil, fmt.Errorf("JSON encoding/gzip error: %v", err)
|
||||
}
|
||||
if err := gzipWriter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("Closing gzip writer: %v", err)
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// json encode
|
||||
jsonEncoder := json.NewEncoder(buffer)
|
||||
if err := jsonEncoder.Encode(field); err != nil {
|
||||
return nil, fmt.Errorf("JSON encoding error: %v", err)
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
type GobMeddler bool
|
||||
|
||||
func (zip GobMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) {
|
||||
// give a pointer to a byte buffer to grab the raw data
|
||||
return new([]byte), nil
|
||||
}
|
||||
|
||||
func (zip GobMeddler) PostRead(fieldAddr, scanTarget interface{}) error {
|
||||
ptr := scanTarget.(*[]byte)
|
||||
if ptr == nil {
|
||||
return fmt.Errorf("GobMeddler.PostRead: nil pointer")
|
||||
}
|
||||
raw := *ptr
|
||||
|
||||
if zip {
|
||||
// un-gzip and decode gob
|
||||
gzipReader, err := gzip.NewReader(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating gzip Reader: %v", err)
|
||||
}
|
||||
defer gzipReader.Close()
|
||||
gobDecoder := gob.NewDecoder(gzipReader)
|
||||
if err := gobDecoder.Decode(fieldAddr); err != nil {
|
||||
return fmt.Errorf("Gob decoder/gzip error: %v", err)
|
||||
}
|
||||
if err := gzipReader.Close(); err != nil {
|
||||
return fmt.Errorf("Closing gzip reader: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode gob
|
||||
gobDecoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
if err := gobDecoder.Decode(fieldAddr); err != nil {
|
||||
return fmt.Errorf("Gob decode error: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zip GobMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
if zip {
|
||||
// gob encode and gzip
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
defer gzipWriter.Close()
|
||||
gobEncoder := gob.NewEncoder(gzipWriter)
|
||||
if err := gobEncoder.Encode(field); err != nil {
|
||||
return nil, fmt.Errorf("Gob encoding/gzip error: %v", err)
|
||||
}
|
||||
if err := gzipWriter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("Closing gzip writer: %v", err)
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// gob encode
|
||||
gobEncoder := gob.NewEncoder(buffer)
|
||||
if err := gobEncoder.Encode(field); err != nil {
|
||||
return nil, fmt.Errorf("Gob encoding error: %v", err)
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
|
@ -1,576 +0,0 @@
|
|||
package meddler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// the name of our struct tag
|
||||
const tagName = "meddler"
|
||||
|
||||
// Database contains database-specific options.
|
||||
// MySQL, PostgreSQL, and SQLite are provided for convenience.
|
||||
// Setting Default to any of these lets you use the package-level convenience functions.
|
||||
type Database struct {
|
||||
Quote string // the quote character for table and column names
|
||||
Placeholder string // the placeholder style to use in generated queries
|
||||
UseReturningToGetID bool // use PostgreSQL-style RETURNING "ID" instead of calling sql.Result.LastInsertID
|
||||
}
|
||||
|
||||
var MySQL = &Database{
|
||||
Quote: "`",
|
||||
Placeholder: "?",
|
||||
UseReturningToGetID: false,
|
||||
}
|
||||
|
||||
var PostgreSQL = &Database{
|
||||
Quote: `"`,
|
||||
Placeholder: "$1",
|
||||
UseReturningToGetID: true,
|
||||
}
|
||||
|
||||
var SQLite = &Database{
|
||||
Quote: `"`,
|
||||
Placeholder: "?",
|
||||
UseReturningToGetID: false,
|
||||
}
|
||||
|
||||
var Default = MySQL
|
||||
|
||||
func (d *Database) quoted(s string) string {
|
||||
return d.Quote + s + d.Quote
|
||||
}
|
||||
|
||||
func (d *Database) placeholder(n int) string {
|
||||
return strings.Replace(d.Placeholder, "1", strconv.FormatInt(int64(n), 10), 1)
|
||||
}
|
||||
|
||||
// Debug enables debug mode, where unused columns and struct fields will be logged
|
||||
var Debug = true
|
||||
|
||||
type structField struct {
|
||||
column string
|
||||
index int
|
||||
primaryKey bool
|
||||
meddler Meddler
|
||||
}
|
||||
|
||||
type structData struct {
|
||||
columns []string
|
||||
fields map[string]*structField
|
||||
pk string
|
||||
}
|
||||
|
||||
// cache reflection data
|
||||
var fieldsCache = make(map[reflect.Type]*structData)
|
||||
var fieldsCacheMutex sync.Mutex
|
||||
|
||||
// getFields gathers the list of columns from a struct using reflection.
|
||||
func getFields(dstType reflect.Type) (*structData, error) {
|
||||
fieldsCacheMutex.Lock()
|
||||
defer fieldsCacheMutex.Unlock()
|
||||
|
||||
if result, present := fieldsCache[dstType]; present {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// make sure dst is a non-nil pointer to a struct
|
||||
if dstType.Kind() != reflect.Ptr {
|
||||
return nil, fmt.Errorf("meddler called with non-pointer destination %v", dstType)
|
||||
}
|
||||
structType := dstType.Elem()
|
||||
if structType.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("meddler called with pointer to non-struct %v", dstType)
|
||||
}
|
||||
|
||||
// gather the list of fields in the struct
|
||||
data := new(structData)
|
||||
data.fields = make(map[string]*structField)
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
f := structType.Field(i)
|
||||
|
||||
// skip non-exported fields
|
||||
if f.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// examine the tag for metadata
|
||||
tag := strings.Split(f.Tag.Get(tagName), ",")
|
||||
|
||||
// was this field marked for skipping?
|
||||
if len(tag) > 0 && tag[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// default to the field name
|
||||
name := f.Name
|
||||
|
||||
// the tag can override the field name
|
||||
if len(tag) > 0 && tag[0] != "" {
|
||||
name = tag[0]
|
||||
}
|
||||
|
||||
// check for a meddler
|
||||
var meddler Meddler = registry["identity"]
|
||||
for j := 1; j < len(tag); j++ {
|
||||
if tag[j] == "pk" {
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
return nil, fmt.Errorf("meddler found field %s which is marked as the primary key but is a pointer", f.Name)
|
||||
}
|
||||
|
||||
// make sure it is an int of some kind
|
||||
switch f.Type.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
default:
|
||||
return nil, fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", f.Name)
|
||||
}
|
||||
|
||||
if data.pk != "" {
|
||||
return nil, fmt.Errorf("meddler found field %s which is marked as the primary key, but a primary key field was already found", f.Name)
|
||||
}
|
||||
data.pk = name
|
||||
} else if m, present := registry[tag[j]]; present {
|
||||
meddler = m
|
||||
} else {
|
||||
return nil, fmt.Errorf("meddler found field %s with meddler %s, but that meddler is not registered", f.Name, tag[j])
|
||||
}
|
||||
}
|
||||
|
||||
if _, present := data.fields[name]; present {
|
||||
return nil, fmt.Errorf("meddler found multiple fields for column %s", name)
|
||||
}
|
||||
data.fields[name] = &structField{
|
||||
column: name,
|
||||
primaryKey: name == data.pk,
|
||||
index: i,
|
||||
meddler: meddler,
|
||||
}
|
||||
data.columns = append(data.columns, name)
|
||||
}
|
||||
|
||||
fieldsCache[dstType] = data
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Columns returns a list of column names for its input struct.
|
||||
func (d *Database) Columns(src interface{}, includePk bool) ([]string, error) {
|
||||
data, err := getFields(reflect.TypeOf(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var names []string
|
||||
for _, elt := range data.columns {
|
||||
if !includePk && elt == data.pk {
|
||||
continue
|
||||
}
|
||||
names = append(names, elt)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Columns using the Default Database type
|
||||
func Columns(src interface{}, includePk bool) ([]string, error) {
|
||||
return Default.Columns(src, includePk)
|
||||
}
|
||||
|
||||
// ColumnsQuoted is similar to Columns, but it return the list of columns in the form:
|
||||
// `column1`,`column2`,...
|
||||
// using Quote as the quote character.
|
||||
func (d *Database) ColumnsQuoted(src interface{}, includePk bool) (string, error) {
|
||||
unquoted, err := Columns(src, includePk)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var parts []string
|
||||
for _, elt := range unquoted {
|
||||
parts = append(parts, d.quoted(elt))
|
||||
}
|
||||
|
||||
return strings.Join(parts, ","), nil
|
||||
}
|
||||
|
||||
// ColumnsQuoted using the Default Database type
|
||||
func ColumnsQuoted(src interface{}, includePk bool) (string, error) {
|
||||
return Default.ColumnsQuoted(src, includePk)
|
||||
}
|
||||
|
||||
// PrimaryKey returns the name and value of the primary key field. The name
|
||||
// is the empty string if there is not primary key field marked.
|
||||
func (d *Database) PrimaryKey(src interface{}) (name string, pk int64, err error) {
|
||||
data, err := getFields(reflect.TypeOf(src))
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
if data.pk == "" {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
name = data.pk
|
||||
field := reflect.ValueOf(src).Elem().Field(data.fields[name].index)
|
||||
switch field.Type().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
pk = field.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
pk = int64(field.Uint())
|
||||
default:
|
||||
return "", 0, fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", name)
|
||||
}
|
||||
|
||||
return name, pk, nil
|
||||
}
|
||||
|
||||
// PrimaryKey using the Default Database type
|
||||
func PrimaryKey(src interface{}) (name string, pk int64, err error) {
|
||||
return Default.PrimaryKey(src)
|
||||
}
|
||||
|
||||
// SetPrimaryKey sets the primary key field to the given int value.
|
||||
func (d *Database) SetPrimaryKey(src interface{}, pk int64) error {
|
||||
data, err := getFields(reflect.TypeOf(src))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if data.pk == "" {
|
||||
return fmt.Errorf("meddler.SetPrimaryKey: no primary key field found")
|
||||
}
|
||||
|
||||
field := reflect.ValueOf(src).Elem().Field(data.fields[data.pk].index)
|
||||
switch field.Type().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
field.SetInt(pk)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
field.SetUint(uint64(pk))
|
||||
default:
|
||||
return fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", data.pk)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPrimaryKey using the Default Database type
|
||||
func SetPrimaryKey(src interface{}, pk int64) error {
|
||||
return Default.SetPrimaryKey(src, pk)
|
||||
}
|
||||
|
||||
// Values returns a list of PreWrite processed values suitable for
|
||||
// use in an INSERT or UPDATE query. If includePk is false, the primary
|
||||
// key field is omitted. The columns used are the same ones (in the same
|
||||
// order) as returned by Columns.
|
||||
func (d *Database) Values(src interface{}, includePk bool) ([]interface{}, error) {
|
||||
columns, err := d.Columns(src, includePk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.SomeValues(src, columns)
|
||||
}
|
||||
|
||||
// Values using the Default Database type
|
||||
func Values(src interface{}, includePk bool) ([]interface{}, error) {
|
||||
return Default.Values(src, includePk)
|
||||
}
|
||||
|
||||
// SomeValues returns a list of PreWrite processed values suitable for
|
||||
// use in an INSERT or UPDATE query. The columns used are the same ones (in
|
||||
// the same order) as specified in the columns argument.
|
||||
func (d *Database) SomeValues(src interface{}, columns []string) ([]interface{}, error) {
|
||||
data, err := getFields(reflect.TypeOf(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
structVal := reflect.ValueOf(src).Elem()
|
||||
|
||||
var values []interface{}
|
||||
for _, name := range columns {
|
||||
field, present := data.fields[name]
|
||||
if !present {
|
||||
// write null to the database
|
||||
values = append(values, nil)
|
||||
|
||||
if Debug {
|
||||
log.Printf("meddler.SomeValues: column [%s] not found in struct", name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
saveVal, err := field.meddler.PreWrite(structVal.Field(field.index).Interface())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("meddler.SomeValues: PreWrite error on column [%s]: %v", name, err)
|
||||
}
|
||||
values = append(values, saveVal)
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// SomeValues using the Default Database type
|
||||
func SomeValues(src interface{}, columns []string) ([]interface{}, error) {
|
||||
return Default.SomeValues(src, columns)
|
||||
}
|
||||
|
||||
// Placeholders returns a list of placeholders suitable for an INSERT or UPDATE query.
|
||||
// If includePk is false, the primary key field is omitted.
|
||||
func (d *Database) Placeholders(src interface{}, includePk bool) ([]string, error) {
|
||||
data, err := getFields(reflect.TypeOf(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var placeholders []string
|
||||
for _, name := range data.columns {
|
||||
if !includePk && name == data.pk {
|
||||
continue
|
||||
}
|
||||
ph := d.placeholder(len(placeholders) + 1)
|
||||
placeholders = append(placeholders, ph)
|
||||
}
|
||||
|
||||
return placeholders, nil
|
||||
}
|
||||
|
||||
// Placeholders using the Default Database type
|
||||
func Placeholders(src interface{}, includePk bool) ([]string, error) {
|
||||
return Default.Placeholders(src, includePk)
|
||||
}
|
||||
|
||||
// PlaceholdersString returns a list of placeholders suitable for an INSERT
|
||||
// or UPDATE query in string form, e.g.:
|
||||
// ?,?,?,?
|
||||
// if includePk is false, the primary key field is omitted.
|
||||
func (d *Database) PlaceholdersString(src interface{}, includePk bool) (string, error) {
|
||||
lst, err := d.Placeholders(src, includePk)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join(lst, ","), nil
|
||||
}
|
||||
|
||||
// PlaceholdersString using the Default Database type
|
||||
func PlaceholdersString(src interface{}, includePk bool) (string, error) {
|
||||
return Default.PlaceholdersString(src, includePk)
|
||||
}
|
||||
|
||||
// scan a single row of data into a struct.
|
||||
func (d *Database) scanRow(data *structData, rows *sql.Rows, dst interface{}, columns []string) error {
|
||||
// check if there is data waiting
|
||||
if !rows.Next() {
|
||||
if err := rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
// get a list of targets
|
||||
targets, err := d.Targets(dst, columns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// perform the scan
|
||||
if err := rows.Scan(targets...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// post-process and copy the target values into the struct
|
||||
if err := d.WriteTargets(dst, columns, targets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
// Targets returns a list of values suitable for handing to a
|
||||
// Scan function in the sql package, complete with meddling. After
|
||||
// the Scan is performed, the same values should be handed to
|
||||
// WriteTargets to finalize the values and record them in the struct.
|
||||
func (d *Database) Targets(dst interface{}, columns []string) ([]interface{}, error) {
|
||||
data, err := getFields(reflect.TypeOf(dst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
structVal := reflect.ValueOf(dst).Elem()
|
||||
|
||||
var targets []interface{}
|
||||
for _, name := range columns {
|
||||
if field, present := data.fields[name]; present {
|
||||
fieldAddr := structVal.Field(field.index).Addr().Interface()
|
||||
scanTarget, err := field.meddler.PreRead(fieldAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("meddler.Targets: PreRead error on column %s: %v", name, err)
|
||||
}
|
||||
targets = append(targets, scanTarget)
|
||||
} else {
|
||||
// no destination, so throw this away
|
||||
targets = append(targets, new(interface{}))
|
||||
|
||||
if Debug {
|
||||
log.Printf("meddler.Targets: column [%s] not found in struct", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
// Targets using the Default Database type
|
||||
func Targets(dst interface{}, columns []string) ([]interface{}, error) {
|
||||
return Default.Targets(dst, columns)
|
||||
}
|
||||
|
||||
// WriteTargets post-processes values with meddlers after a Scan from the
|
||||
// sql package has been performed. The list of targets is normally produced
|
||||
// by Targets.
|
||||
func (d *Database) WriteTargets(dst interface{}, columns []string, targets []interface{}) error {
|
||||
if len(columns) != len(targets) {
|
||||
return fmt.Errorf("meddler.WriteTargets: mismatch in number of columns (%d) and targets (%s)",
|
||||
len(columns), len(targets))
|
||||
}
|
||||
|
||||
data, err := getFields(reflect.TypeOf(dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structVal := reflect.ValueOf(dst).Elem()
|
||||
|
||||
for i, name := range columns {
|
||||
if field, present := data.fields[name]; present {
|
||||
fieldAddr := structVal.Field(field.index).Addr().Interface()
|
||||
err := field.meddler.PostRead(fieldAddr, targets[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("meddler.WriteTargets: PostRead error on column [%s]: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
// not destination, so throw this away
|
||||
if Debug {
|
||||
log.Printf("meddler.WriteTargets: column [%s] not found in struct", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteTargets using the Default Database type
|
||||
func WriteTargets(dst interface{}, columns []string, targets []interface{}) error {
|
||||
return Default.WriteTargets(dst, columns, targets)
|
||||
}
|
||||
|
||||
// Scan scans a single sql result row into a struct.
|
||||
// It leaves rows ready to be scanned again for the next row.
|
||||
// Returns sql.ErrNoRows if there is no data to read.
|
||||
func (d *Database) Scan(rows *sql.Rows, dst interface{}) error {
|
||||
// get the list of struct fields
|
||||
data, err := getFields(reflect.TypeOf(dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get the sql columns
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.scanRow(data, rows, dst, columns)
|
||||
}
|
||||
|
||||
// Scan using the Default Database type
|
||||
func Scan(rows *sql.Rows, dst interface{}) error {
|
||||
return Default.Scan(rows, dst)
|
||||
}
|
||||
|
||||
// ScanRow scans a single sql result row into a struct.
|
||||
// It reads exactly one result row and closes rows when finished.
|
||||
// Returns sql.ErrNoRows if there is no result row.
|
||||
func (d *Database) ScanRow(rows *sql.Rows, dst interface{}) error {
|
||||
// make sure we always close rows
|
||||
defer rows.Close()
|
||||
|
||||
if err := d.Scan(rows, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScanRow using the Default Database type
|
||||
func ScanRow(rows *sql.Rows, dst interface{}) error {
|
||||
return Default.ScanRow(rows, dst)
|
||||
}
|
||||
|
||||
// ScanAll scans all sql result rows into a slice of structs.
|
||||
// It reads all rows and closes rows when finished.
|
||||
// dst should be a pointer to a slice of the appropriate type.
|
||||
// The new results will be appended to any existing data in dst.
|
||||
func (d *Database) ScanAll(rows *sql.Rows, dst interface{}) error {
|
||||
// make sure we always close rows
|
||||
defer rows.Close()
|
||||
|
||||
// make sure dst is an appropriate type
|
||||
dstVal := reflect.ValueOf(dst)
|
||||
if dstVal.Kind() != reflect.Ptr || dstVal.IsNil() {
|
||||
return fmt.Errorf("ScanAll called with non-pointer destination: %T", dst)
|
||||
}
|
||||
sliceVal := dstVal.Elem()
|
||||
if sliceVal.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("ScanAll called with pointer to non-slice: %T", dst)
|
||||
}
|
||||
ptrType := sliceVal.Type().Elem()
|
||||
if ptrType.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("ScanAll expects element to be pointers, found %T", dst)
|
||||
}
|
||||
eltType := ptrType.Elem()
|
||||
if eltType.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("ScanAll expects element to be pointers to structs, found %T", dst)
|
||||
}
|
||||
|
||||
// get the list of struct fields
|
||||
data, err := getFields(ptrType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get the sql columns
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gather the results
|
||||
for {
|
||||
// create a new element
|
||||
eltVal := reflect.New(eltType)
|
||||
elt := eltVal.Interface()
|
||||
|
||||
// scan it
|
||||
if err := d.scanRow(data, rows, elt, columns); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// add to the result slice
|
||||
sliceVal.Set(reflect.Append(sliceVal, eltVal))
|
||||
}
|
||||
}
|
||||
|
||||
// ScanAll using the Default Database type
|
||||
func ScanAll(rows *sql.Rows, dst interface{}) error {
|
||||
return Default.ScanAll(rows, dst)
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
(The MIT License)
|
||||
|
||||
Copyright (c) 2012 James Cooper <james@bitmechanic.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
'Software'), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,6 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG = github.com/coopernurse/gorp
|
||||
GOFILES = gorp.go dialect.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
|
@ -0,0 +1,672 @@
|
|||
# Go Relational Persistence
|
||||
|
||||
[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp)
|
||||
|
||||
I hesitate to call gorp an ORM. Go doesn't really have objects, at least
|
||||
not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't
|
||||
know anything about the relationships between your structs (at least not
|
||||
yet). So the "R" is questionable too (but I use it in the name because,
|
||||
well, it seemed more clever).
|
||||
|
||||
The "M" is alive and well. Given some Go structs and a database, gorp
|
||||
should remove a fair amount of boilerplate busy-work from your code.
|
||||
|
||||
I hope that gorp saves you time, minimizes the drudgery of getting data
|
||||
in and out of your database, and helps your code focus on algorithms,
|
||||
not infrastructure.
|
||||
|
||||
* Bind struct fields to table columns via API or tag
|
||||
* Support for embedded structs
|
||||
* Support for transactions
|
||||
* Forward engineer db schema from structs (great for unit tests)
|
||||
* Pre/post insert/update/delete hooks
|
||||
* Automatically generate insert/update/delete statements for a struct
|
||||
* Automatic binding of auto increment PKs back to struct after insert
|
||||
* Delete by primary key(s)
|
||||
* Select by primary key(s)
|
||||
* Optional trace sql logging
|
||||
* Bind arbitrary SQL queries to a struct
|
||||
* Bind slice to SELECT query results without type assertions
|
||||
* Use positional or named bind parameters in custom SELECT queries
|
||||
* Optional optimistic locking using a version column (for update/deletes)
|
||||
|
||||
## Installation
|
||||
|
||||
# install the library:
|
||||
go get gopkg.in/gorp.v1
|
||||
|
||||
// use in your .go code:
|
||||
import (
|
||||
"gopkg.in/gorp.v1"
|
||||
)
|
||||
|
||||
## Versioning
|
||||
|
||||
This project provides a stable release (v1.x tags) and a bleeding edge codebase (master).
|
||||
|
||||
`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2.
|
||||
|
||||
If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path.
|
||||
|
||||
## API Documentation
|
||||
|
||||
Full godoc output from the latest v1 release is available here:
|
||||
|
||||
https://godoc.org/gopkg.in/gorp.v1
|
||||
|
||||
For the latest code in master:
|
||||
|
||||
https://godoc.org/github.com/go-gorp/gorp
|
||||
|
||||
## Quickstart
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"gopkg.in/gorp.v1"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize the DbMap
|
||||
dbmap := initDb()
|
||||
defer dbmap.Db.Close()
|
||||
|
||||
// delete any existing rows
|
||||
err := dbmap.TruncateTables()
|
||||
checkErr(err, "TruncateTables failed")
|
||||
|
||||
// create two posts
|
||||
p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum")
|
||||
p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum")
|
||||
|
||||
// insert rows - auto increment PKs will be set properly after the insert
|
||||
err = dbmap.Insert(&p1, &p2)
|
||||
checkErr(err, "Insert failed")
|
||||
|
||||
// use convenience SelectInt
|
||||
count, err := dbmap.SelectInt("select count(*) from posts")
|
||||
checkErr(err, "select count(*) failed")
|
||||
log.Println("Rows after inserting:", count)
|
||||
|
||||
// update a row
|
||||
p2.Title = "Go 1.2 is better than ever"
|
||||
count, err = dbmap.Update(&p2)
|
||||
checkErr(err, "Update failed")
|
||||
log.Println("Rows updated:", count)
|
||||
|
||||
// fetch one row - note use of "post_id" instead of "Id" since column is aliased
|
||||
//
|
||||
// Postgres users should use $1 instead of ? placeholders
|
||||
// See 'Known Issues' below
|
||||
//
|
||||
err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id)
|
||||
checkErr(err, "SelectOne failed")
|
||||
log.Println("p2 row:", p2)
|
||||
|
||||
// fetch all rows
|
||||
var posts []Post
|
||||
_, err = dbmap.Select(&posts, "select * from posts order by post_id")
|
||||
checkErr(err, "Select failed")
|
||||
log.Println("All rows:")
|
||||
for x, p := range posts {
|
||||
log.Printf(" %d: %v\n", x, p)
|
||||
}
|
||||
|
||||
// delete row by PK
|
||||
count, err = dbmap.Delete(&p1)
|
||||
checkErr(err, "Delete failed")
|
||||
log.Println("Rows deleted:", count)
|
||||
|
||||
// delete row manually via Exec
|
||||
_, err = dbmap.Exec("delete from posts where post_id=?", p2.Id)
|
||||
checkErr(err, "Exec failed")
|
||||
|
||||
// confirm count is zero
|
||||
count, err = dbmap.SelectInt("select count(*) from posts")
|
||||
checkErr(err, "select count(*) failed")
|
||||
log.Println("Row count - should be zero:", count)
|
||||
|
||||
log.Println("Done!")
|
||||
}
|
||||
|
||||
type Post struct {
|
||||
// db tag lets you specify the column name if it differs from the struct field
|
||||
Id int64 `db:"post_id"`
|
||||
Created int64
|
||||
Title string
|
||||
Body string
|
||||
}
|
||||
|
||||
func newPost(title, body string) Post {
|
||||
return Post{
|
||||
Created: time.Now().UnixNano(),
|
||||
Title: title,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func initDb() *gorp.DbMap {
|
||||
// connect to db using standard Go database/sql API
|
||||
// use whatever database/sql driver you wish
|
||||
db, err := sql.Open("sqlite3", "/tmp/post_db.bin")
|
||||
checkErr(err, "sql.Open failed")
|
||||
|
||||
// construct a gorp DbMap
|
||||
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}}
|
||||
|
||||
// add a table, setting the table name to 'posts' and
|
||||
// specifying that the Id property is an auto incrementing PK
|
||||
dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id")
|
||||
|
||||
// create the table. in a production system you'd generally
|
||||
// use a migration tool, or create the tables via scripts
|
||||
err = dbmap.CreateTablesIfNotExists()
|
||||
checkErr(err, "Create tables failed")
|
||||
|
||||
return dbmap
|
||||
}
|
||||
|
||||
func checkErr(err error, msg string) {
|
||||
if err != nil {
|
||||
log.Fatalln(msg, err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Mapping structs to tables
|
||||
|
||||
First define some types:
|
||||
|
||||
```go
|
||||
type Invoice struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
Memo string
|
||||
PersonId int64
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
FName string
|
||||
LName string
|
||||
}
|
||||
|
||||
// Example of using tags to alias fields to column names
|
||||
// The 'db' value is the column name
|
||||
//
|
||||
// A hyphen will cause gorp to skip this field, similar to the
|
||||
// Go json package.
|
||||
//
|
||||
// This is equivalent to using the ColMap methods:
|
||||
//
|
||||
// table := dbmap.AddTableWithName(Product{}, "product")
|
||||
// table.ColMap("Id").Rename("product_id")
|
||||
// table.ColMap("Price").Rename("unit_price")
|
||||
// table.ColMap("IgnoreMe").SetTransient(true)
|
||||
//
|
||||
type Product struct {
|
||||
Id int64 `db:"product_id"`
|
||||
Price int64 `db:"unit_price"`
|
||||
IgnoreMe string `db:"-"`
|
||||
}
|
||||
```
|
||||
|
||||
Then create a mapper, typically you'd do this one time at app startup:
|
||||
|
||||
```go
|
||||
// connect to db using standard Go database/sql API
|
||||
// use whatever database/sql driver you wish
|
||||
db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword")
|
||||
|
||||
// construct a gorp DbMap
|
||||
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}}
|
||||
|
||||
// register the structs you wish to use with gorp
|
||||
// you can also use the shorter dbmap.AddTable() if you
|
||||
// don't want to override the table name
|
||||
//
|
||||
// SetKeys(true) means we have a auto increment primary key, which
|
||||
// will get automatically bound to your struct post-insert
|
||||
//
|
||||
t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id")
|
||||
t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id")
|
||||
t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id")
|
||||
```
|
||||
|
||||
### Struct Embedding
|
||||
|
||||
gorp supports embedding structs. For example:
|
||||
|
||||
```go
|
||||
type Names struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
type WithEmbeddedStruct struct {
|
||||
Id int64
|
||||
Names
|
||||
}
|
||||
|
||||
es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}}
|
||||
err := dbmap.Insert(es)
|
||||
```
|
||||
|
||||
See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example.
|
||||
|
||||
### Create/Drop Tables ###
|
||||
|
||||
Automatically create / drop registered tables. This is useful for unit tests
|
||||
but is entirely optional. You can of course use gorp with tables created manually,
|
||||
or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose).
|
||||
|
||||
```go
|
||||
// create all registered tables
|
||||
dbmap.CreateTables()
|
||||
|
||||
// same as above, but uses "if not exists" clause to skip tables that are
|
||||
// already defined
|
||||
dbmap.CreateTablesIfNotExists()
|
||||
|
||||
// drop
|
||||
dbmap.DropTables()
|
||||
```
|
||||
|
||||
### SQL Logging
|
||||
|
||||
Optionally you can pass in a logger to trace all SQL statements.
|
||||
I recommend enabling this initially while you're getting the feel for what
|
||||
gorp is doing on your behalf.
|
||||
|
||||
Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies.
|
||||
However, you can write your own `GorpLogger` implementation, or use a package such
|
||||
as `glog` if you want more control over how statements are logged.
|
||||
|
||||
```go
|
||||
// Will log all SQL statements + args as they are run
|
||||
// The first arg is a string prefix to prepend to all log messages
|
||||
dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds))
|
||||
|
||||
// Turn off tracing
|
||||
dbmap.TraceOff()
|
||||
```
|
||||
|
||||
### Insert
|
||||
|
||||
```go
|
||||
// Must declare as pointers so optional callback hooks
|
||||
// can operate on your data, not copies
|
||||
inv1 := &Invoice{0, 100, 200, "first order", 0}
|
||||
inv2 := &Invoice{0, 100, 200, "second order", 0}
|
||||
|
||||
// Insert your rows
|
||||
err := dbmap.Insert(inv1, inv2)
|
||||
|
||||
// Because we called SetKeys(true) on Invoice, the Id field
|
||||
// will be populated after the Insert() automatically
|
||||
fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id)
|
||||
```
|
||||
|
||||
### Update
|
||||
|
||||
Continuing the above example, use the `Update` method to modify an Invoice:
|
||||
|
||||
```go
|
||||
// count is the # of rows updated, which should be 1 in this example
|
||||
count, err := dbmap.Update(inv1)
|
||||
```
|
||||
|
||||
### Delete
|
||||
|
||||
If you have primary key(s) defined for a struct, you can use the `Delete`
|
||||
method to remove rows:
|
||||
|
||||
```go
|
||||
count, err := dbmap.Delete(inv1)
|
||||
```
|
||||
|
||||
### Select by Key
|
||||
|
||||
Use the `Get` method to fetch a single row by primary key. It returns
|
||||
nil if no row is found.
|
||||
|
||||
```go
|
||||
// fetch Invoice with Id=99
|
||||
obj, err := dbmap.Get(Invoice{}, 99)
|
||||
inv := obj.(*Invoice)
|
||||
```
|
||||
|
||||
### Ad Hoc SQL
|
||||
|
||||
#### SELECT
|
||||
|
||||
`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice
|
||||
or a single struct.
|
||||
|
||||
```go
|
||||
// Select a slice - first return value is not needed when a slice pointer is passed to Select()
|
||||
var posts []Post
|
||||
_, err := dbmap.Select(&posts, "select * from post order by id")
|
||||
|
||||
// You can also use primitive types
|
||||
var ids []string
|
||||
_, err := dbmap.Select(&ids, "select id from post")
|
||||
|
||||
// Select a single row.
|
||||
// Returns an error if no row found, or if more than one row is found
|
||||
var post Post
|
||||
err := dbmap.SelectOne(&post, "select * from post where id=?", id)
|
||||
```
|
||||
|
||||
Want to do joins? Just write the SQL and the struct. gorp will bind them:
|
||||
|
||||
```go
|
||||
// Define a type for your join
|
||||
// It *must* contain all the columns in your SELECT statement
|
||||
//
|
||||
// The names here should match the aliased column names you specify
|
||||
// in your SQL - no additional binding work required. simple.
|
||||
//
|
||||
type InvoicePersonView struct {
|
||||
InvoiceId int64
|
||||
PersonId int64
|
||||
Memo string
|
||||
FName string
|
||||
}
|
||||
|
||||
// Create some rows
|
||||
p1 := &Person{0, 0, 0, "bob", "smith"}
|
||||
dbmap.Insert(p1)
|
||||
|
||||
// notice how we can wire up p1.Id to the invoice easily
|
||||
inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id}
|
||||
dbmap.Insert(inv1)
|
||||
|
||||
// Run your query
|
||||
query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " +
|
||||
"from invoice_test i, person_test p " +
|
||||
"where i.PersonId = p.Id"
|
||||
|
||||
// pass a slice to Select()
|
||||
var list []InvoicePersonView
|
||||
_, err := dbmap.Select(&list, query)
|
||||
|
||||
// this should test true
|
||||
expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName}
|
||||
if reflect.DeepEqual(list[0], expected) {
|
||||
fmt.Println("Woot! My join worked!")
|
||||
}
|
||||
```
|
||||
|
||||
#### SELECT string or int64
|
||||
|
||||
gorp provides a few convenience methods for selecting a single string or int64.
|
||||
|
||||
```go
|
||||
// select single int64 from db (use $1 instead of ? for postgresql)
|
||||
i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal)
|
||||
|
||||
// select single string from db:
|
||||
s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal)
|
||||
|
||||
```
|
||||
|
||||
#### Named bind parameters
|
||||
|
||||
You may use a map or struct to bind parameters by name. This is currently
|
||||
only supported in SELECT queries.
|
||||
|
||||
```go
|
||||
_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{
|
||||
"name": "Rob",
|
||||
"age": 31,
|
||||
})
|
||||
```
|
||||
|
||||
#### UPDATE / DELETE
|
||||
|
||||
You can execute raw SQL if you wish. Particularly good for batch operations.
|
||||
|
||||
```go
|
||||
res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10)
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
You can batch operations into a transaction:
|
||||
|
||||
```go
|
||||
func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error {
|
||||
// Start a new transaction
|
||||
trans, err := dbmap.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trans.Insert(per)
|
||||
inv.PersonId = per.Id
|
||||
trans.Insert(inv)
|
||||
|
||||
// if the commit is successful, a nil error is returned
|
||||
return trans.Commit()
|
||||
}
|
||||
```
|
||||
|
||||
### Hooks
|
||||
|
||||
Use hooks to update data before/after saving to the db. Good for timestamps:
|
||||
|
||||
```go
|
||||
// implement the PreInsert and PreUpdate hooks
|
||||
func (i *Invoice) PreInsert(s gorp.SqlExecutor) error {
|
||||
i.Created = time.Now().UnixNano()
|
||||
i.Updated = i.Created
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error {
|
||||
i.Updated = time.Now().UnixNano()
|
||||
return nil
|
||||
}
|
||||
|
||||
// You can use the SqlExecutor to cascade additional SQL
|
||||
// Take care to avoid cycles. gorp won't prevent them.
|
||||
//
|
||||
// Here's an example of a cascading delete
|
||||
//
|
||||
func (p *Person) PreDelete(s gorp.SqlExecutor) error {
|
||||
query := "delete from invoice_test where PersonId=?"
|
||||
err := s.Exec(query, p.Id); if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Full list of hooks that you can implement:
|
||||
|
||||
PostGet
|
||||
PreInsert
|
||||
PostInsert
|
||||
PreUpdate
|
||||
PostUpdate
|
||||
PreDelete
|
||||
PostDelete
|
||||
|
||||
All have the same signature. for example:
|
||||
|
||||
func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error
|
||||
|
||||
### Optimistic Locking
|
||||
|
||||
gorp provides a simple optimistic locking feature, similar to Java's JPA, that
|
||||
will raise an error if you try to update/delete a row whose `version` column
|
||||
has a value different than the one in memory. This provides a safe way to do
|
||||
"select then update" style operations without explicit read and write locks.
|
||||
|
||||
```go
|
||||
// Version is an auto-incremented number, managed by gorp
|
||||
// If this property is present on your struct, update
|
||||
// operations will be constrained
|
||||
//
|
||||
// For example, say we defined Person as:
|
||||
|
||||
type Person struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
FName string
|
||||
LName string
|
||||
|
||||
// automatically used as the Version col
|
||||
// use table.SetVersionCol("columnName") to map a different
|
||||
// struct field as the version field
|
||||
Version int64
|
||||
}
|
||||
|
||||
p1 := &Person{0, 0, 0, "Bob", "Smith", 0}
|
||||
dbmap.Insert(p1) // Version is now 1
|
||||
|
||||
obj, err := dbmap.Get(Person{}, p1.Id)
|
||||
p2 := obj.(*Person)
|
||||
p2.LName = "Edwards"
|
||||
dbmap.Update(p2) // Version is now 2
|
||||
|
||||
p1.LName = "Howard"
|
||||
|
||||
// Raises error because p1.Version == 1, which is out of date
|
||||
count, err := dbmap.Update(p1)
|
||||
_, ok := err.(gorp.OptimisticLockError)
|
||||
if ok {
|
||||
// should reach this statement
|
||||
|
||||
// in a real app you might reload the row and retry, or
|
||||
// you might propegate this to the user, depending on the desired
|
||||
// semantics
|
||||
fmt.Printf("Tried to update row with stale data: %v\n", err)
|
||||
} else {
|
||||
// some other db error occurred - log or return up the stack
|
||||
fmt.Printf("Unknown db err: %v\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
## Database Drivers
|
||||
|
||||
gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here:
|
||||
|
||||
http://code.google.com/p/go-wiki/wiki/SQLDrivers
|
||||
|
||||
Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be
|
||||
implemented per database vendor. Dialects are provided for:
|
||||
|
||||
* MySQL
|
||||
* PostgreSQL
|
||||
* sqlite3
|
||||
|
||||
Each of these three databases pass the test suite. See `gorp_test.go` for example
|
||||
DSNs for these three databases.
|
||||
|
||||
Support is also provided for:
|
||||
|
||||
* Oracle (contributed by @klaidliadon)
|
||||
* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb
|
||||
|
||||
Note that these databases are not covered by CI and I (@coopernurse) have no good way to
|
||||
test them locally. So please try them and send patches as needed, but expect a bit more
|
||||
unpredicability.
|
||||
|
||||
## Known Issues
|
||||
|
||||
### SQL placeholder portability
|
||||
|
||||
Different databases use different strings to indicate variable placeholders in
|
||||
prepared SQL statements. Unlike some database abstraction layers (such as JDBC),
|
||||
Go's `database/sql` does not standardize this.
|
||||
|
||||
SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates
|
||||
to a Dialect implementation for each database, and will generate portable SQL.
|
||||
|
||||
Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be
|
||||
parsed. Consequently you may have portability issues if you write a query like this:
|
||||
|
||||
```go
|
||||
// works on MySQL and Sqlite3, but not with Postgresql
|
||||
err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30)
|
||||
```
|
||||
|
||||
In `Select` and `SelectOne` you can use named parameters to work around this.
|
||||
The following is portable:
|
||||
|
||||
```go
|
||||
err := dbmap.SelectOne(&val, "select * from foo where id = :id",
|
||||
map[string]interface{} { "id": 30})
|
||||
```
|
||||
|
||||
### time.Time and time zones
|
||||
|
||||
gorp will pass `time.Time` fields through to the `database/sql` driver, but note that
|
||||
the behavior of this type varies across database drivers.
|
||||
|
||||
MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77
|
||||
|
||||
To avoid any potential issues with timezone/DST, consider using an integer field for time
|
||||
data and storing UNIX time.
|
||||
|
||||
## Running the tests
|
||||
|
||||
The included tests may be run against MySQL, Postgresql, or sqlite3.
|
||||
You must set two environment variables so the test code knows which driver to
|
||||
use, and how to connect to your database.
|
||||
|
||||
```sh
|
||||
# MySQL example:
|
||||
export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123
|
||||
export GORP_TEST_DIALECT=mysql
|
||||
|
||||
# run the tests
|
||||
go test
|
||||
|
||||
# run the tests and benchmarks
|
||||
go test -bench="Bench" -benchtime 10
|
||||
```
|
||||
|
||||
Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3"
|
||||
See the `test_all.sh` script for examples of all 3 databases. This is the script I run
|
||||
locally to test the library.
|
||||
|
||||
## Performance
|
||||
|
||||
gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL.
|
||||
|
||||
## Help/Support
|
||||
|
||||
IRC: #gorp
|
||||
Mailing list: gorp-dev@googlegroups.com
|
||||
Bugs/Enhancements: Create a github issue
|
||||
|
||||
## Pull requests / Contributions
|
||||
|
||||
Contributions are very welcome. Please follow these guidelines:
|
||||
|
||||
* Fork the `master` branch and issue pull requests targeting the `master` branch
|
||||
* If you are adding an enhancement, please open an issue first with your proposed change.
|
||||
* Changes that break backwards compatibility in the public API are only accepted after we
|
||||
discuss on a GitHub issue for a while.
|
||||
|
||||
Thanks!
|
||||
|
||||
## Contributors
|
||||
|
||||
* matthias-margush - column aliasing via tags
|
||||
* Rob Figueiredo - @robfig
|
||||
* Quinn Slack - @sqs
|
|
@ -0,0 +1,692 @@
|
|||
package gorp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Dialect interface encapsulates behaviors that differ across
|
||||
// SQL databases. At present the Dialect is only used by CreateTables()
|
||||
// but this could change in the future
|
||||
type Dialect interface {
|
||||
|
||||
// adds a suffix to any query, usually ";"
|
||||
QuerySuffix() string
|
||||
|
||||
// ToSqlType returns the SQL column type to use when creating a
|
||||
// table of the given Go Type. maxsize can be used to switch based on
|
||||
// size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB,
|
||||
// or LONGBLOB depending on the maxsize
|
||||
ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string
|
||||
|
||||
// string to append to primary key column definitions
|
||||
AutoIncrStr() string
|
||||
|
||||
// string to bind autoincrement columns to. Empty string will
|
||||
// remove reference to those columns in the INSERT statement.
|
||||
AutoIncrBindValue() string
|
||||
|
||||
AutoIncrInsertSuffix(col *ColumnMap) string
|
||||
|
||||
// string to append to "create table" statement for vendor specific
|
||||
// table attributes
|
||||
CreateTableSuffix() string
|
||||
|
||||
// string to truncate tables
|
||||
TruncateClause() string
|
||||
|
||||
// bind variable string to use when forming SQL statements
|
||||
// in many dbs it is "?", but Postgres appears to use $1
|
||||
//
|
||||
// i is a zero based index of the bind variable in this statement
|
||||
//
|
||||
BindVar(i int) string
|
||||
|
||||
// Handles quoting of a field name to ensure that it doesn't raise any
|
||||
// SQL parsing exceptions by using a reserved word as a field name.
|
||||
QuoteField(field string) string
|
||||
|
||||
// Handles building up of a schema.database string that is compatible with
|
||||
// the given dialect
|
||||
//
|
||||
// schema - The schema that <table> lives in
|
||||
// table - The table name
|
||||
QuotedTableForQuery(schema string, table string) string
|
||||
|
||||
// Existance clause for table creation / deletion
|
||||
IfSchemaNotExists(command, schema string) string
|
||||
IfTableExists(command, schema, table string) string
|
||||
IfTableNotExists(command, schema, table string) string
|
||||
}
|
||||
|
||||
// IntegerAutoIncrInserter is implemented by dialects that can perform
|
||||
// inserts with automatically incremented integer primary keys. If
|
||||
// the dialect can handle automatic assignment of more than just
|
||||
// integers, see TargetedAutoIncrInserter.
|
||||
type IntegerAutoIncrInserter interface {
|
||||
InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error)
|
||||
}
|
||||
|
||||
// TargetedAutoIncrInserter is implemented by dialects that can
|
||||
// perform automatic assignment of any primary key type (i.e. strings
|
||||
// for uuids, integers for serials, etc).
|
||||
type TargetedAutoIncrInserter interface {
|
||||
// InsertAutoIncrToTarget runs an insert operation and assigns the
|
||||
// automatically generated primary key directly to the passed in
|
||||
// target. The target should be a pointer to the primary key
|
||||
// field of the value being inserted.
|
||||
InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error
|
||||
}
|
||||
|
||||
func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
res, err := exec.Exec(insertSql, params...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res.LastInsertId()
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// sqlite3 //
|
||||
/////////////
|
||||
|
||||
type SqliteDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d SqliteDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "integer"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return "integer"
|
||||
case reflect.Float64, reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "blob"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "integer"
|
||||
case "NullFloat64":
|
||||
return "real"
|
||||
case "NullBool":
|
||||
return "integer"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns autoincrement
|
||||
func (d SqliteDialect) AutoIncrStr() string {
|
||||
return "autoincrement"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) AutoIncrBindValue() string {
|
||||
return "null"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d SqliteDialect) CreateTableSuffix() string {
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
// With sqlite, there technically isn't a TRUNCATE statement,
|
||||
// but a DELETE FROM uses a truncate optimization:
|
||||
// http://www.sqlite.org/lang_delete.html
|
||||
func (d SqliteDialect) TruncateClause() string {
|
||||
return "delete from"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d SqliteDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) QuoteField(f string) string {
|
||||
return `"` + f + `"`
|
||||
}
|
||||
|
||||
// sqlite does not have schemas like PostgreSQL does, so just escape it like normal
|
||||
func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// PostgreSQL //
|
||||
////////////////
|
||||
|
||||
type PostgresDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
if isAutoIncr {
|
||||
return "serial"
|
||||
}
|
||||
return "integer"
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if isAutoIncr {
|
||||
return "bigserial"
|
||||
}
|
||||
return "bigint"
|
||||
case reflect.Float64:
|
||||
return "double precision"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "bytea"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double precision"
|
||||
case "NullBool":
|
||||
return "boolean"
|
||||
case "Time":
|
||||
return "timestamp with time zone"
|
||||
}
|
||||
|
||||
if maxsize > 0 {
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
} else {
|
||||
return "text"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Returns empty string
|
||||
func (d PostgresDialect) AutoIncrStr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d PostgresDialect) AutoIncrBindValue() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return " returning " + col.ColumnName
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d PostgresDialect) CreateTableSuffix() string {
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
func (d PostgresDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "$(i+1)"
|
||||
func (d PostgresDialect) BindVar(i int) string {
|
||||
return fmt.Sprintf("$%d", i+1)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error {
|
||||
rows, err := exec.query(insertSql, params...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
err := rows.Scan(target)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuoteField(f string) string {
|
||||
return `"` + strings.ToLower(f) + `"`
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// MySQL //
|
||||
///////////
|
||||
|
||||
// Implementation of Dialect for MySQL databases.
|
||||
type MySQLDialect struct {
|
||||
|
||||
// Engine is the storage engine to use "InnoDB" vs "MyISAM" for example
|
||||
Engine string
|
||||
|
||||
// Encoding is the character encoding to use for created tables
|
||||
Encoding string
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int8:
|
||||
return "tinyint"
|
||||
case reflect.Uint8:
|
||||
return "tinyint unsigned"
|
||||
case reflect.Int16:
|
||||
return "smallint"
|
||||
case reflect.Uint16:
|
||||
return "smallint unsigned"
|
||||
case reflect.Int, reflect.Int32:
|
||||
return "int"
|
||||
case reflect.Uint, reflect.Uint32:
|
||||
return "int unsigned"
|
||||
case reflect.Int64:
|
||||
return "bigint"
|
||||
case reflect.Uint64:
|
||||
return "bigint unsigned"
|
||||
case reflect.Float64, reflect.Float32:
|
||||
return "double"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "mediumblob"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double"
|
||||
case "NullBool":
|
||||
return "tinyint"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns auto_increment
|
||||
func (d MySQLDialect) AutoIncrStr() string {
|
||||
return "auto_increment"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) AutoIncrBindValue() string {
|
||||
return "null"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns engine=%s charset=%s based on values stored on struct
|
||||
func (d MySQLDialect) CreateTableSuffix() string {
|
||||
if d.Engine == "" || d.Encoding == "" {
|
||||
msg := "gorp - undefined"
|
||||
|
||||
if d.Engine == "" {
|
||||
msg += " MySQLDialect.Engine"
|
||||
}
|
||||
if d.Engine == "" && d.Encoding == "" {
|
||||
msg += ","
|
||||
}
|
||||
if d.Encoding == "" {
|
||||
msg += " MySQLDialect.Encoding"
|
||||
}
|
||||
msg += ". Check that your MySQLDialect was correctly initialized when declared."
|
||||
panic(msg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d MySQLDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuoteField(f string) string {
|
||||
return "`" + f + "`"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Sql Server //
|
||||
////////////////
|
||||
|
||||
// Implementation of Dialect for Microsoft SQL Server databases.
|
||||
// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb
|
||||
|
||||
type SqlServerDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "bit"
|
||||
case reflect.Int8:
|
||||
return "tinyint"
|
||||
case reflect.Uint8:
|
||||
return "smallint"
|
||||
case reflect.Int16:
|
||||
return "smallint"
|
||||
case reflect.Uint16:
|
||||
return "int"
|
||||
case reflect.Int, reflect.Int32:
|
||||
return "int"
|
||||
case reflect.Uint, reflect.Uint32:
|
||||
return "bigint"
|
||||
case reflect.Int64:
|
||||
return "bigint"
|
||||
case reflect.Uint64:
|
||||
return "bigint"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Float64:
|
||||
return "float(53)"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "varbinary"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "float(53)"
|
||||
case "NullBool":
|
||||
return "tinyint"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns auto_increment
|
||||
func (d SqlServerDialect) AutoIncrStr() string {
|
||||
return "identity(0,1)"
|
||||
}
|
||||
|
||||
// Empty string removes autoincrement columns from the INSERT statements.
|
||||
func (d SqlServerDialect) AutoIncrBindValue() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d SqlServerDialect) CreateTableSuffix() string {
|
||||
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) TruncateClause() string {
|
||||
return "delete from"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d SqlServerDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuoteField(f string) string {
|
||||
return `"` + f + `"`
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return table
|
||||
}
|
||||
return schema + "." + table
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string {
|
||||
s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command)
|
||||
return s
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) IfTableExists(command, schema, table string) string {
|
||||
var schema_clause string
|
||||
if strings.TrimSpace(schema) != "" {
|
||||
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
|
||||
}
|
||||
s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
|
||||
return s
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string {
|
||||
var schema_clause string
|
||||
if strings.TrimSpace(schema) != "" {
|
||||
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
|
||||
}
|
||||
s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
|
||||
return s
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Oracle //
|
||||
///////////
|
||||
|
||||
// Implementation of Dialect for Oracle databases.
|
||||
type OracleDialect struct{}
|
||||
|
||||
func (d OracleDialect) QuerySuffix() string { return "" }
|
||||
|
||||
func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
if isAutoIncr {
|
||||
return "serial"
|
||||
}
|
||||
return "integer"
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if isAutoIncr {
|
||||
return "bigserial"
|
||||
}
|
||||
return "bigint"
|
||||
case reflect.Float64:
|
||||
return "double precision"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "bytea"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double precision"
|
||||
case "NullBool":
|
||||
return "boolean"
|
||||
case "NullTime", "Time":
|
||||
return "timestamp with time zone"
|
||||
}
|
||||
|
||||
if maxsize > 0 {
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
} else {
|
||||
return "text"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Returns empty string
|
||||
func (d OracleDialect) AutoIncrStr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d OracleDialect) AutoIncrBindValue() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return " returning " + col.ColumnName
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d OracleDialect) CreateTableSuffix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d OracleDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "$(i+1)"
|
||||
func (d OracleDialect) BindVar(i int) string {
|
||||
return fmt.Sprintf(":%d", i+1)
|
||||
}
|
||||
|
||||
func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
rows, err := exec.query(insertSql, params...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
var id int64
|
||||
err := rows.Scan(&id)
|
||||
return id, err
|
||||
}
|
||||
|
||||
return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
|
||||
}
|
||||
|
||||
func (d OracleDialect) QuoteField(f string) string {
|
||||
return `"` + strings.ToUpper(f) + `"`
|
||||
}
|
||||
|
||||
func (d OracleDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package gorp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A non-fatal error, when a select query returns columns that do not exist
|
||||
// as fields in the struct it is being mapped to
|
||||
type NoFieldInTypeError struct {
|
||||
TypeName string
|
||||
MissingColNames []string
|
||||
}
|
||||
|
||||
func (err *NoFieldInTypeError) Error() string {
|
||||
return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName)
|
||||
}
|
||||
|
||||
// returns true if the error is non-fatal (ie, we shouldn't immediately return)
|
||||
func NonFatalError(err error) bool {
|
||||
switch err.(type) {
|
||||
case *NoFieldInTypeError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,22 @@
|
|||
#!/bin/sh
|
||||
|
||||
# on macs, you may need to:
|
||||
# export GOBUILDFLAG=-ldflags -linkmode=external
|
||||
|
||||
set -e
|
||||
|
||||
export GORP_TEST_DSN=gorptest/gorptest/gorptest
|
||||
export GORP_TEST_DIALECT=mysql
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN=gorptest:gorptest@/gorptest
|
||||
export GORP_TEST_DIALECT=gomysql
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable"
|
||||
export GORP_TEST_DIALECT=postgres
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN=/tmp/gorptest.bin
|
||||
export GORP_TEST_DIALECT=sqlite
|
||||
go test $GOBUILDFLAG .
|
|
@ -69,10 +69,28 @@
|
|||
"revisionTime": "2016-12-08T17:59:04Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "8T6S2BULujaIanLkxD1iw5rklBY=",
|
||||
"path": "github.com/russross/meddler",
|
||||
"revision": "f742b0f9424f3bfaffe189e921d734608929e84f",
|
||||
"revisionTime": "2015-05-13T15:34:18Z"
|
||||
"checksumSHA1": "Qgwcmaxcms96cseiKeTu73RiKJw=",
|
||||
"path": "github.com/rubenv/sql-migrate",
|
||||
"revision": "1ed79968dfca5de79adb13c84523caaa4fc865a9",
|
||||
"revisionTime": "2017-01-04T09:54:22Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "IQ6NtaJ+F1rtAMIaCetzKb5nrxQ=",
|
||||
"path": "github.com/rubenv/sql-migrate/sqlparse",
|
||||
"revision": "1ed79968dfca5de79adb13c84523caaa4fc865a9",
|
||||
"revisionTime": "2017-01-04T09:54:22Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Ak1mii27DHhEIjLnNqhK2HNibxg=",
|
||||
"path": "github.com/ziutek/mymysql/godrv",
|
||||
"revision": "501176fc99be4b4fb0fd01ae8d6af5d8bb408e44",
|
||||
"revisionTime": "2016-11-23T14:17:03Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "x5wFYF5U9/AMBxJ5LQZayskPW7o=",
|
||||
"path": "github.com/ziutek/mymysql/native",
|
||||
"revision": "501176fc99be4b4fb0fd01ae8d6af5d8bb408e44",
|
||||
"revisionTime": "2016-11-23T14:17:03Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "vE43s37+4CJ2CDU6TlOUOYE0K9c=",
|
||||
|
@ -91,6 +109,12 @@
|
|||
"path": "gopkg.in/alecthomas/kingpin.v2",
|
||||
"revision": "e9044be3ab2a8e11d4e1f418d12f0790d57e8d70",
|
||||
"revisionTime": "2016-08-29T10:30:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xunNOgG8P6Xh4SM+LPQ1zMDXa8Q=",
|
||||
"path": "gopkg.in/gorp.v1",
|
||||
"revision": "c87af80f3cc5036b55b83d77171e156791085e2e",
|
||||
"revisionTime": "2015-02-04T08:55:30Z"
|
||||
}
|
||||
],
|
||||
"rootPath": "github.com/dannyvankooten/ana"
|
||||
|
|
Loading…
Reference in New Issue