Merge branch 'one-db' into 'master'

merge datastores into sqlx package

See merge request !101
This commit is contained in:
Reed Allman
2017-07-11 11:30:41 -07:00
104 changed files with 229573 additions and 2732 deletions

View File

@@ -38,7 +38,7 @@ Then after every change, run
make run
```
to build and run the `functions` binary. It will start Functions using an embedded `Bolt` database running on port `8080`.
to build and run the `functions` binary. It will start Functions using an embedded `sqlite3` database running on port `8080`.
### Test

View File

@@ -35,10 +35,10 @@ docker-build:
docker build --build-arg HTTP_PROXY -t funcy/functions:latest .
docker-run: docker-build
docker run --rm --privileged -it -e NO_PROXY -e HTTP_PROXY -e LOG_LEVEL=debug -e "DB_URL=bolt:///app/data/bolt.db" -v ${CURDIR}/data:/app/data -p 8080:8080 funcy/functions
docker run --rm --privileged -it -e NO_PROXY -e HTTP_PROXY -e LOG_LEVEL=debug -e "DB_URL=sqlite3:///app/data/fn.db" -v ${CURDIR}/data:/app/data -p 8080:8080 funcy/functions
docker-test-run-with-bolt:
./api_test.sh bolt
docker-test-run-with-sqlite3:
./api_test.sh sqlite3
docker-test-run-with-mysql:
./api_test.sh mysql
@@ -46,9 +46,6 @@ docker-test-run-with-mysql:
docker-test-run-with-postgres:
./api_test.sh postgres
docker-test-run-with-redis:
./api_test.sh redis
docker-test:
docker run -ti --privileged --rm -e LOG_LEVEL=debug \
-v /var/run/docker.sock:/var/run/docker.sock \

View File

@@ -1,508 +0,0 @@
package bolt
import (
"encoding/json"
"net/url"
"os"
"path/filepath"
"time"
"context"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
type BoltDatastore struct {
routesBucket []byte
appsBucket []byte
logsBucket []byte
extrasBucket []byte
callsBucket []byte
db *bolt.DB
log logrus.FieldLogger
}
func New(url *url.URL) (models.Datastore, error) {
dir := filepath.Dir(url.Path)
log := logrus.WithFields(logrus.Fields{"db": url.Scheme, "dir": dir})
err := os.MkdirAll(dir, 0755)
if err != nil {
log.WithError(err).Errorln("Could not create data directory for db")
return nil, err
}
log.WithFields(logrus.Fields{"path": url.Path}).Debug("Creating bolt db")
db, err := bolt.Open(url.Path, 0655, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
log.WithError(err).Errorln("Error on bolt.Open")
return nil, err
}
// I don't think we need a prefix here do we? Made it blank. If we do, we should call the query param "prefix" instead of bucket.
bucketPrefix := ""
if url.Query()["bucket"] != nil {
bucketPrefix = url.Query()["bucket"][0]
}
routesBucketName := []byte(bucketPrefix + "routes")
appsBucketName := []byte(bucketPrefix + "apps")
logsBucketName := []byte(bucketPrefix + "logs")
extrasBucketName := []byte(bucketPrefix + "extras") // todo: think of a better name
callsBucketName := []byte(bucketPrefix + "calls")
err = db.Update(func(tx *bolt.Tx) error {
for _, name := range [][]byte{routesBucketName, appsBucketName, logsBucketName,
extrasBucketName, callsBucketName} {
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
log.WithError(err).WithFields(logrus.Fields{"name": name}).Error("create bucket")
return err
}
}
return nil
})
if err != nil {
log.WithError(err).Errorln("Error creating bolt buckets")
return nil, err
}
ds := &BoltDatastore{
routesBucket: routesBucketName,
appsBucket: appsBucketName,
logsBucket: logsBucketName,
extrasBucket: extrasBucketName,
callsBucket: callsBucketName,
db: db,
log: log,
}
log.WithFields(logrus.Fields{"prefix": bucketPrefix, "file": url.Path}).Debug("BoltDB initialized")
return datastoreutil.NewValidator(ds), nil
}
func (ds *BoltDatastore) InsertTask(ctx context.Context, task *models.Task) error {
var fnCall *models.FnCall
taskID := []byte(task.ID)
err := ds.db.Update(
func(tx *bolt.Tx) error {
bIm := tx.Bucket(ds.callsBucket)
buf, err := json.Marshal(fnCall.FromTask(task))
if err != nil {
return err
}
err = bIm.Put(taskID, buf)
if err != nil {
return err
}
return nil
})
return err
}
func (ds *BoltDatastore) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
res := models.FnCalls{}
err := ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.callsBucket)
err2 := b.ForEach(func(key, v []byte) error {
call := &models.FnCall{}
err := json.Unmarshal(v, call)
if err != nil {
return err
}
if applyCallFilter(call, filter) {
res = append(res, call)
}
return nil
})
if err2 != nil {
logrus.WithError(err2).Errorln("Couldn't get calls!")
}
return err2
})
return res, err
}
func (ds *BoltDatastore) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
var res *models.FnCall
err := ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.callsBucket)
v := b.Get([]byte(callID))
if v != nil {
fnCall := &models.FnCall{}
err := json.Unmarshal(v, fnCall)
if err != nil {
return nil
}
res = fnCall
} else {
return models.ErrCallNotFound
}
return nil
})
return res, err
}
func (ds *BoltDatastore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
appname := []byte(app.Name)
err := ds.db.Update(func(tx *bolt.Tx) error {
bIm := tx.Bucket(ds.appsBucket)
v := bIm.Get(appname)
if v != nil {
return models.ErrAppsAlreadyExists
}
buf, err := json.Marshal(app)
if err != nil {
return err
}
err = bIm.Put(appname, buf)
if err != nil {
return err
}
bjParent := tx.Bucket(ds.routesBucket)
_, err = bjParent.CreateBucketIfNotExists(appname)
if err != nil {
return err
}
return nil
})
return app, err
}
func (ds *BoltDatastore) UpdateApp(ctx context.Context, newapp *models.App) (*models.App, error) {
var app *models.App
appname := []byte(newapp.Name)
err := ds.db.Update(func(tx *bolt.Tx) error {
bIm := tx.Bucket(ds.appsBucket)
v := bIm.Get(appname)
if v == nil {
return models.ErrAppsNotFound
}
err := json.Unmarshal(v, &app)
if err != nil {
return err
}
app.UpdateConfig(newapp.Config)
buf, err := json.Marshal(app)
if err != nil {
return err
}
err = bIm.Put(appname, buf)
if err != nil {
return err
}
bjParent := tx.Bucket(ds.routesBucket)
_, err = bjParent.CreateBucketIfNotExists([]byte(app.Name))
if err != nil {
return err
}
return nil
})
return app, err
}
func (ds *BoltDatastore) RemoveApp(ctx context.Context, appName string) error {
err := ds.db.Update(func(tx *bolt.Tx) error {
bIm := tx.Bucket(ds.appsBucket)
err := bIm.Delete([]byte(appName))
if err != nil {
return err
}
bjParent := tx.Bucket(ds.routesBucket)
err = bjParent.DeleteBucket([]byte(appName))
if err != nil {
return err
}
return nil
})
return err
}
func (ds *BoltDatastore) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
res := []*models.App{}
err := ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.appsBucket)
err2 := b.ForEach(func(key, v []byte) error {
app := &models.App{}
err := json.Unmarshal(v, app)
if err != nil {
return err
}
if applyAppFilter(app, filter) {
res = append(res, app)
}
return nil
})
if err2 != nil {
logrus.WithError(err2).Errorln("Couldn't get apps!")
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func (ds *BoltDatastore) GetApp(ctx context.Context, name string) (*models.App, error) {
var res *models.App
err := ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.appsBucket)
v := b.Get([]byte(name))
if v != nil {
app := &models.App{}
err := json.Unmarshal(v, app)
if err != nil {
return err
}
res = app
} else {
return models.ErrAppsNotFound
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func (ds *BoltDatastore) getRouteBucketForApp(tx *bolt.Tx, appName string) (*bolt.Bucket, error) {
// todo: should this be reversed? Make a bucket for each app that contains sub buckets for routes, etc
bp := tx.Bucket(ds.routesBucket)
b := bp.Bucket([]byte(appName))
if b == nil {
return nil, models.ErrAppsNotFound
}
return b, nil
}
func (ds *BoltDatastore) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
routePath := []byte(route.Path)
err := ds.db.Update(func(tx *bolt.Tx) error {
b, err := ds.getRouteBucketForApp(tx, route.AppName)
if err != nil {
return err
}
v := b.Get(routePath)
if v != nil {
return models.ErrRoutesAlreadyExists
}
buf, err := json.Marshal(route)
if err != nil {
return err
}
err = b.Put(routePath, buf)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return route, nil
}
func (ds *BoltDatastore) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
routePath := []byte(newroute.Path)
var route *models.Route
err := ds.db.Update(func(tx *bolt.Tx) error {
b, err := ds.getRouteBucketForApp(tx, newroute.AppName)
if err != nil {
return err
}
v := b.Get(routePath)
if v == nil {
return models.ErrRoutesNotFound
}
err = json.Unmarshal(v, &route)
if err != nil {
return err
}
route.Update(newroute)
buf, err := json.Marshal(route)
if err != nil {
return err
}
return b.Put(routePath, buf)
})
if err != nil {
return nil, err
}
return route, nil
}
func (ds *BoltDatastore) RemoveRoute(ctx context.Context, appName, routePath string) error {
err := ds.db.Update(func(tx *bolt.Tx) error {
b, err := ds.getRouteBucketForApp(tx, appName)
if err != nil {
return err
}
err = b.Delete([]byte(routePath))
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (ds *BoltDatastore) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
var route *models.Route
err := ds.db.View(func(tx *bolt.Tx) error {
b, err := ds.getRouteBucketForApp(tx, appName)
if err != nil {
return err
}
v := b.Get([]byte(routePath))
if v == nil {
return models.ErrRoutesNotFound
}
if v != nil {
err = json.Unmarshal(v, &route)
}
return err
})
return route, err
}
func (ds *BoltDatastore) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) ([]*models.Route, error) {
res := []*models.Route{}
err := ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.routesBucket).Bucket([]byte(appName))
if b == nil {
return nil
}
i := 0
c := b.Cursor()
var k, v []byte
k, v = c.Last()
// Iterate backwards, newest first
for ; k != nil; k, v = c.Prev() {
var route models.Route
err := json.Unmarshal(v, &route)
if err != nil {
return err
}
if applyRouteFilter(&route, filter) {
i++
res = append(res, &route)
}
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func (ds *BoltDatastore) GetRoutes(ctx context.Context, filter *models.RouteFilter) ([]*models.Route, error) {
res := []*models.Route{}
err := ds.db.View(func(tx *bolt.Tx) error {
i := 0
rbucket := tx.Bucket(ds.routesBucket)
b := rbucket.Cursor()
var k, v []byte
k, v = b.First()
// Iterates all buckets
for ; k != nil && v == nil; k, v = b.Next() {
bucket := rbucket.Bucket(k)
r := bucket.Cursor()
var k2, v2 []byte
k2, v2 = r.Last()
// Iterate all routes
for ; k2 != nil; k2, v2 = r.Prev() {
var route models.Route
err := json.Unmarshal(v2, &route)
if err != nil {
return err
}
if applyRouteFilter(&route, filter) {
i++
res = append(res, &route)
}
}
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func (ds *BoltDatastore) Put(ctx context.Context, key, value []byte) error {
ds.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.extrasBucket) // todo: maybe namespace by app?
err := b.Put(key, value)
return err
})
return nil
}
func (ds *BoltDatastore) Get(ctx context.Context, key []byte) ([]byte, error) {
var ret []byte
ds.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(ds.extrasBucket)
ret = b.Get(key)
return nil
})
return ret, nil
}
func applyAppFilter(app *models.App, filter *models.AppFilter) bool {
if filter != nil && filter.Name != "" {
nameLike, err := regexp.MatchString(strings.Replace(filter.Name, "%", ".*", -1), app.Name)
return err == nil && nameLike
}
return true
}
func applyRouteFilter(route *models.Route, filter *models.RouteFilter) bool {
return filter == nil || (filter.Path == "" || route.Path == filter.Path) &&
(filter.AppName == "" || route.AppName == filter.AppName) &&
(filter.Image == "" || route.Image == filter.Image)
}
func applyCallFilter(call *models.FnCall, filter *models.CallFilter) bool {
return filter == nil || (filter.AppName == call.AppName) && (filter.Path == call.Path)
}

View File

@@ -1,24 +0,0 @@
package bolt
import (
"net/url"
"os"
"testing"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoretest"
)
const tmpBolt = "/tmp/func_test_bolt.db"
func TestDatastore(t *testing.T) {
os.Remove(tmpBolt)
u, err := url.Parse("bolt://" + tmpBolt)
if err != nil {
t.Fatalf("failed to parse url:", err)
}
ds, err := New(u)
if err != nil {
t.Fatalf("failed to create bolt datastore:", err)
}
datastoretest.Test(t, ds)
}

View File

@@ -5,10 +5,7 @@ import (
"net/url"
"github.com/Sirupsen/logrus"
"gitlab-odx.oracle.com/odx/functions/api/datastore/bolt"
"gitlab-odx.oracle.com/odx/functions/api/datastore/mysql"
"gitlab-odx.oracle.com/odx/functions/api/datastore/postgres"
"gitlab-odx.oracle.com/odx/functions/api/datastore/redis"
"gitlab-odx.oracle.com/odx/functions/api/datastore/sql"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
@@ -19,14 +16,8 @@ func New(dbURL string) (models.Datastore, error) {
}
logrus.WithFields(logrus.Fields{"db": u.Scheme}).Debug("creating new datastore")
switch u.Scheme {
case "bolt":
return bolt.New(u)
case "postgres":
return postgres.New(u)
case "mysql":
return mysql.New(u)
case "redis":
return redis.New(u)
case "sqlite3", "postgres", "mysql":
return sql.New(u)
default:
return nil, fmt.Errorf("db type not supported %v", u.Scheme)
}

View File

@@ -468,47 +468,6 @@ func Test(t *testing.T, ds models.Datastore) {
t.Fatalf("Test UpdateRoute inexistent: expected error to be `%v`, but it was `%v`", models.ErrRoutesNotFound, err)
}
})
t.Run("put-get", func(t *testing.T) {
// Testing Put/Get
err := ds.Put(ctx, nil, nil)
if err != models.ErrDatastoreEmptyKey {
t.Log(buf.String())
t.Fatalf("Test Put(nil,nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyKey, err)
}
err = ds.Put(ctx, []byte("test"), []byte("success"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
val, err := ds.Get(ctx, []byte("test"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
if string(val) != "success" {
t.Log(buf.String())
t.Fatalf("Test Get: expected value to be `%v`, but it was `%v`", "success", string(val))
}
err = ds.Put(ctx, []byte("test"), nil)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
val, err = ds.Get(ctx, []byte("test"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
if string(val) != "" {
t.Log(buf.String())
t.Fatalf("Test Get: expected value to be `%v`, but it was `%v`", "", string(val))
}
})
}
var testApp = &models.App{

View File

@@ -5,12 +5,13 @@ import (
"database/sql"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"strings"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
// TODO scrap for sqlx
type RowScanner interface {
Scan(dest ...interface{}) error
}
@@ -165,213 +166,4 @@ func ScanCall(scanner RowScanner, call *models.FnCall) error {
return err
}
return nil
}
func SQLGetCall(db *sql.DB, callSelector, callID, whereStm string) (*models.FnCall, error) {
var call models.FnCall
row := db.QueryRow(fmt.Sprintf(whereStm, callSelector), callID)
err := ScanCall(row, &call)
if err != nil {
return nil, err
}
return &call, nil
}
func SQLGetCalls(db *sql.DB, cSelector string, filter *models.CallFilter, whereStm, andStm string) (models.FnCalls, error) {
res := models.FnCalls{}
filterQuery, args := BuildFilterCallQuery(filter, whereStm, andStm)
rows, err := db.Query(fmt.Sprintf("%s %s", cSelector, filterQuery), args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var call models.FnCall
err := ScanCall(rows, &call)
if err != nil {
continue
}
res = append(res, &call)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
func SQLGetApp(db *sql.DB, queryStr string, queryArgs ...interface{}) (*models.App, error) {
row := db.QueryRow(queryStr, queryArgs...)
var resName string
var config string
err := row.Scan(&resName, &config)
if err != nil {
if err == sql.ErrNoRows {
return nil, models.ErrAppsNotFound
}
return nil, err
}
res := &models.App{
Name: resName,
}
if len(config) > 0 {
err := json.Unmarshal([]byte(config), &res.Config)
if err != nil {
return nil, err
}
}
return res, nil
}
func SQLGetApps(db *sql.DB, filter *models.AppFilter, whereStm, selectStm string) ([]*models.App, error) {
res := []*models.App{}
filterQuery, args := BuildFilterAppQuery(filter, whereStm)
rows, err := db.Query(fmt.Sprintf(selectStm, filterQuery), args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var app models.App
err := ScanApp(rows, &app)
if err != nil {
if err == sql.ErrNoRows {
return res, nil
}
return res, err
}
res = append(res, &app)
}
if err := rows.Err(); err != nil {
return res, err
}
return res, nil
}
func NewDatastore(dataSourceName, dialect string, tables []string) (*sql.DB, error) {
db, err := sql.Open(dialect, dataSourceName)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
maxIdleConns := 30 // c.MaxIdleConnections
db.SetMaxIdleConns(maxIdleConns)
logrus.WithFields(logrus.Fields{"max_idle_connections": maxIdleConns}).Info(
fmt.Sprintf("%v datastore dialed", dialect))
for _, v := range tables {
_, err = db.Exec(v)
if err != nil {
return nil, err
}
}
return db, nil
}
func SQLGetRoutes(db *sql.DB, filter *models.RouteFilter, rSelect string, whereStm, andStm string) ([]*models.Route, error) {
res := []*models.Route{}
filterQuery, args := BuildFilterRouteQuery(filter, whereStm, andStm)
rows, err := db.Query(fmt.Sprintf("%s %s", rSelect, filterQuery), args...)
// todo: check for no rows so we don't respond with a sql 500 err
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var route models.Route
err := ScanRoute(rows, &route)
if err != nil {
continue
}
res = append(res, &route)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
func SQLGetRoutesByApp(db *sql.DB, appName string, filter *models.RouteFilter, rSelect, defaultFilterQuery, whereStm, andStm string) ([]*models.Route, error) {
res := []*models.Route{}
var filterQuery string
var args []interface{}
if filter == nil {
filterQuery = defaultFilterQuery
args = []interface{}{appName}
} else {
filter.AppName = appName
filterQuery, args = BuildFilterRouteQuery(filter, whereStm, andStm)
}
rows, err := db.Query(fmt.Sprintf("%s %s", rSelect, filterQuery), args...)
// todo: check for no rows so we don't respond with a sql 500 err
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var route models.Route
err := ScanRoute(rows, &route)
if err != nil {
continue
}
res = append(res, &route)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
func SQLGetRoute(db *sql.DB, appName, routePath, rSelectCondition, routeSelector string) (*models.Route, error) {
var route models.Route
row := db.QueryRow(fmt.Sprintf(rSelectCondition, routeSelector), appName, routePath)
err := ScanRoute(row, &route)
if err == sql.ErrNoRows {
return nil, models.ErrRoutesNotFound
} else if err != nil {
return nil, err
}
return &route, nil
}
func SQLRemoveRoute(db *sql.DB, appName, routePath, deleteStm string) error {
res, err := db.Exec(deleteStm, routePath, appName)
if err != nil {
return err
}
n, err := res.RowsAffected()
if err != nil {
return err
}
if n == 0 {
return models.ErrRoutesRemoving
}
return nil
}

View File

@@ -6,62 +6,28 @@ import (
"gitlab-odx.oracle.com/odx/functions/api/models"
)
// Datastore is a copy of models.Datastore, with additional comments on parameter guarantees.
type Datastore interface {
// name will never be empty.
GetApp(ctx context.Context, name string) (*models.App, error)
GetApps(ctx context.Context, appFilter *models.AppFilter) ([]*models.App, error)
// app and app.Name will never be nil/empty.
InsertApp(ctx context.Context, app *models.App) (*models.App, error)
UpdateApp(ctx context.Context, app *models.App) (*models.App, error)
// name will never be empty.
RemoveApp(ctx context.Context, name string) error
// appName and routePath will never be empty.
GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error)
RemoveRoute(ctx context.Context, appName, routePath string) error
GetRoutes(ctx context.Context, filter *models.RouteFilter) (routes []*models.Route, err error)
// appName will never be empty
GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) (routes []*models.Route, err error)
// route will never be nil and route's AppName and Path will never be empty.
InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error)
UpdateRoute(ctx context.Context, route *models.Route) (*models.Route, error)
InsertTask(ctx context.Context, task *models.Task) error
GetTask(ctx context.Context, callID string) (*models.FnCall, error)
GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error)
// key will never be nil/empty
Put(ctx context.Context, key, val []byte) error
Get(ctx context.Context, key []byte) ([]byte, error)
}
// NewValidator returns a models.Datastore which validates certain arguments before delegating to ds.
func NewValidator(ds Datastore) models.Datastore {
func NewValidator(ds models.Datastore) models.Datastore {
return &validator{ds}
}
type validator struct {
ds Datastore
models.Datastore
}
// name will never be empty.
func (v *validator) GetApp(ctx context.Context, name string) (app *models.App, err error) {
if name == "" {
return nil, models.ErrDatastoreEmptyAppName
}
return v.ds.GetApp(ctx, name)
return v.Datastore.GetApp(ctx, name)
}
func (v *validator) GetApps(ctx context.Context, appFilter *models.AppFilter) ([]*models.App, error) {
return v.ds.GetApps(ctx, appFilter)
return v.Datastore.GetApps(ctx, appFilter)
}
// app and app.Name will never be nil/empty.
func (v *validator) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
if app == nil {
return nil, models.ErrDatastoreEmptyApp
@@ -70,9 +36,10 @@ func (v *validator) InsertApp(ctx context.Context, app *models.App) (*models.App
return nil, models.ErrDatastoreEmptyAppName
}
return v.ds.InsertApp(ctx, app)
return v.Datastore.InsertApp(ctx, app)
}
// app and app.Name will never be nil/empty.
func (v *validator) UpdateApp(ctx context.Context, app *models.App) (*models.App, error) {
if app == nil {
return nil, models.ErrDatastoreEmptyApp
@@ -80,17 +47,19 @@ func (v *validator) UpdateApp(ctx context.Context, app *models.App) (*models.App
if app.Name == "" {
return nil, models.ErrDatastoreEmptyAppName
}
return v.ds.UpdateApp(ctx, app)
return v.Datastore.UpdateApp(ctx, app)
}
// name will never be empty.
func (v *validator) RemoveApp(ctx context.Context, name string) error {
if name == "" {
return models.ErrDatastoreEmptyAppName
}
return v.ds.RemoveApp(ctx, name)
return v.Datastore.RemoveApp(ctx, name)
}
// appName and routePath will never be empty.
func (v *validator) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
if appName == "" {
return nil, models.ErrDatastoreEmptyAppName
@@ -99,24 +68,26 @@ func (v *validator) GetRoute(ctx context.Context, appName, routePath string) (*m
return nil, models.ErrDatastoreEmptyRoutePath
}
return v.ds.GetRoute(ctx, appName, routePath)
return v.Datastore.GetRoute(ctx, appName, routePath)
}
func (v *validator) GetRoutes(ctx context.Context, routeFilter *models.RouteFilter) (routes []*models.Route, err error) {
if routeFilter != nil && routeFilter.AppName != "" {
return v.ds.GetRoutesByApp(ctx, routeFilter.AppName, routeFilter)
return v.Datastore.GetRoutesByApp(ctx, routeFilter.AppName, routeFilter)
}
return v.ds.GetRoutes(ctx, routeFilter)
return v.Datastore.GetRoutes(ctx, routeFilter)
}
// appName will never be empty
func (v *validator) GetRoutesByApp(ctx context.Context, appName string, routeFilter *models.RouteFilter) (routes []*models.Route, err error) {
if appName == "" {
return nil, models.ErrDatastoreEmptyAppName
}
return v.ds.GetRoutesByApp(ctx, appName, routeFilter)
return v.Datastore.GetRoutesByApp(ctx, appName, routeFilter)
}
// route will never be nil and route's AppName and Path will never be empty.
func (v *validator) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
if route == nil {
return nil, models.ErrDatastoreEmptyRoute
@@ -128,9 +99,10 @@ func (v *validator) InsertRoute(ctx context.Context, route *models.Route) (*mode
return nil, models.ErrDatastoreEmptyRoutePath
}
return v.ds.InsertRoute(ctx, route)
return v.Datastore.InsertRoute(ctx, route)
}
// route will never be nil and route's AppName and Path will never be empty.
func (v *validator) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
if newroute == nil {
return nil, models.ErrDatastoreEmptyRoute
@@ -141,9 +113,10 @@ func (v *validator) UpdateRoute(ctx context.Context, newroute *models.Route) (*m
if newroute.Path == "" {
return nil, models.ErrDatastoreEmptyRoutePath
}
return v.ds.UpdateRoute(ctx, newroute)
return v.Datastore.UpdateRoute(ctx, newroute)
}
// appName and routePath will never be empty.
func (v *validator) RemoveRoute(ctx context.Context, appName, routePath string) error {
if appName == "" {
return models.ErrDatastoreEmptyAppName
@@ -152,35 +125,13 @@ func (v *validator) RemoveRoute(ctx context.Context, appName, routePath string)
return models.ErrDatastoreEmptyRoutePath
}
return v.ds.RemoveRoute(ctx, appName, routePath)
}
func (v *validator) Put(ctx context.Context, key, value []byte) error {
if len(key) == 0 {
return models.ErrDatastoreEmptyKey
}
return v.ds.Put(ctx, key, value)
}
func (v *validator) Get(ctx context.Context, key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, models.ErrDatastoreEmptyKey
}
return v.ds.Get(ctx, key)
}
func (v *validator) InsertTask(ctx context.Context, task *models.Task) error {
return v.ds.InsertTask(ctx, task)
return v.Datastore.RemoveRoute(ctx, appName, routePath)
}
// callID will never be empty.
func (v *validator) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
if callID == "" {
return nil, models.ErrDatastoreEmptyTaskID
}
return v.ds.GetTask(ctx, callID)
}
func (v *validator) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
return v.ds.GetTasks(ctx, filter)
return v.Datastore.GetTask(ctx, callID)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/logs"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
@@ -12,13 +13,15 @@ type mock struct {
Routes models.Routes
Calls models.FnCalls
data map[string][]byte
models.FnLog
}
func NewMock() models.Datastore {
return NewMockInit(nil, nil, nil, nil)
}
func NewMockInit(apps models.Apps, routes models.Routes, calls models.FnCalls, logs []*models.FnCallLog) models.Datastore {
func NewMockInit(apps models.Apps, routes models.Routes, calls models.FnCalls, loggos []*models.FnCallLog) models.Datastore {
if apps == nil {
apps = models.Apps{}
}
@@ -28,10 +31,10 @@ func NewMockInit(apps models.Apps, routes models.Routes, calls models.FnCalls, l
if calls == nil {
calls = models.FnCalls{}
}
if logs == nil {
logs = []*models.FnCallLog{}
if loggos == nil {
loggos = []*models.FnCallLog{}
}
return datastoreutil.NewValidator(&mock{apps, routes, calls, make(map[string][]byte)})
return datastoreutil.NewValidator(&mock{apps, routes, calls, make(map[string][]byte), logs.NewMock()})
}
func (m *mock) GetApp(ctx context.Context, appName string) (app *models.App, err error) {

View File

@@ -1,453 +0,0 @@
package mysql
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
const routesTableCreate = `CREATE TABLE IF NOT EXISTS routes (
app_name varchar(256) NOT NULL,
path varchar(256) NOT NULL,
image varchar(256) NOT NULL,
format varchar(16) NOT NULL,
memory int NOT NULL,
timeout int NOT NULL,
idle_timeout int NOT NULL,
type varchar(16) NOT NULL,
headers text NOT NULL,
config text NOT NULL,
PRIMARY KEY (app_name, path)
);`
const appsTableCreate = `CREATE TABLE IF NOT EXISTS apps (
name varchar(256) NOT NULL PRIMARY KEY,
config text NOT NULL
);`
const extrasTableCreate = `CREATE TABLE IF NOT EXISTS extras (
id varchar(256) NOT NULL PRIMARY KEY,
value varchar(256) NOT NULL
);`
const routeSelector = `SELECT app_name, path, image, format, memory, type, timeout, idle_timeout, headers, config FROM routes`
const callTableCreate = `CREATE TABLE IF NOT EXISTS calls (
created_at varchar(256) NOT NULL,
started_at varchar(256) NOT NULL,
completed_at varchar(256) NOT NULL,
status varchar(256) NOT NULL,
id varchar(256) NOT NULL,
app_name varchar(256) NOT NULL,
path varchar(256) NOT NULL,
PRIMARY KEY (id)
);`
const callSelector = `SELECT id, created_at, started_at, completed_at, status, app_name, path FROM calls`
/*
MySQLDatastore defines a basic MySQL Datastore struct.
*/
type MySQLDatastore struct {
db *sql.DB
}
/*
New creates a new MySQL Datastore.
*/
func New(url *url.URL) (models.Datastore, error) {
tables := []string{routesTableCreate, appsTableCreate,
extrasTableCreate, callTableCreate}
dialect := "mysql"
sqlDatastore := &MySQLDatastore{}
dataSourceName := fmt.Sprintf("%s@%s%s", url.User.String(), url.Host, url.Path)
db, err := datastoreutil.NewDatastore(dataSourceName, dialect, tables)
if err != nil {
return nil, err
}
sqlDatastore.db = db
return datastoreutil.NewValidator(sqlDatastore), nil
}
/*
InsertApp inserts an app to MySQL.
*/
func (ds *MySQLDatastore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
var cbyte []byte
var err error
if app.Config != nil {
cbyte, err = json.Marshal(app.Config)
if err != nil {
return nil, err
}
}
stmt, err := ds.db.Prepare("INSERT apps SET name=?,config=?")
if err != nil {
return nil, err
}
_, err = stmt.Exec(app.Name, string(cbyte))
if err != nil {
mysqlErr := err.(*mysql.MySQLError)
if mysqlErr.Number == 1062 {
return nil, models.ErrAppsAlreadyExists
}
return nil, err
}
return app, nil
}
/*
UpdateApp updates an existing app on MySQL.
*/
func (ds *MySQLDatastore) UpdateApp(ctx context.Context, newapp *models.App) (*models.App, error) {
app := &models.App{Name: newapp.Name}
err := ds.Tx(func(tx *sql.Tx) error {
row := ds.db.QueryRow(`SELECT config FROM apps WHERE name=?`, app.Name)
var config string
if err := row.Scan(&config); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
return err
}
if config != "" {
err := json.Unmarshal([]byte(config), &app.Config)
if err != nil {
return err
}
}
app.UpdateConfig(newapp.Config)
cbyte, err := json.Marshal(app.Config)
if err != nil {
return err
}
stmt, err := ds.db.Prepare(`UPDATE apps SET config=? WHERE name=?`)
if err != nil {
return err
}
res, err := stmt.Exec(string(cbyte), app.Name)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrAppsNotFound
}
return nil
})
if err != nil {
return nil, err
}
return app, nil
}
/*
RemoveApp removes an existing app on MySQL.
*/
func (ds *MySQLDatastore) RemoveApp(ctx context.Context, appName string) error {
_, err := ds.db.Exec(`
DELETE FROM apps
WHERE name = ?
`, appName)
return err
}
/*
GetApp retrieves an app from MySQL.
*/
func (ds *MySQLDatastore) GetApp(ctx context.Context, name string) (*models.App, error) {
queryStr := `SELECT name, config FROM apps WHERE name=?`
queryArgs := []interface{}{name}
return datastoreutil.SQLGetApp(ds.db, queryStr, queryArgs...)
}
/*
GetApps retrieves an array of apps according to a specific filter.
*/
func (ds *MySQLDatastore) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
whereStm := "WHERE name LIKE ?"
selectStm := "SELECT DISTINCT name, config FROM apps %s"
return datastoreutil.SQLGetApps(ds.db, filter, whereStm, selectStm)
}
/*
InsertRoute inserts an route to MySQL.
*/
func (ds *MySQLDatastore) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return nil, err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return nil, err
}
err = ds.Tx(func(tx *sql.Tx) error {
r := tx.QueryRow(`SELECT 1 FROM apps WHERE name=?`, route.AppName)
if err := r.Scan(new(int)); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
}
same, err := tx.Query(`SELECT 1 FROM routes WHERE app_name=? AND path=?`,
route.AppName, route.Path)
if err != nil {
return err
}
defer same.Close()
if same.Next() {
return models.ErrRoutesAlreadyExists
}
_, err = tx.Exec(`
INSERT INTO routes (
app_name,
path,
image,
format,
memory,
type,
timeout,
idle_timeout,
headers,
config
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`,
route.AppName,
route.Path,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
)
return err
})
if err != nil {
return nil, err
}
return route, nil
}
/*
UpdateRoute updates an existing route on MySQL.
*/
func (ds *MySQLDatastore) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
var route models.Route
err := ds.Tx(func(tx *sql.Tx) error {
row := ds.db.QueryRow(fmt.Sprintf("%s WHERE app_name=? AND path=?", routeSelector), newroute.AppName, newroute.Path)
if err := datastoreutil.ScanRoute(row, &route); err == sql.ErrNoRows {
return models.ErrRoutesNotFound
} else if err != nil {
return err
}
route.Update(newroute)
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return err
}
res, err := tx.Exec(`
UPDATE routes SET
image = ?,
format = ?,
memory = ?,
type = ?,
timeout = ?,
idle_timeout = ?,
headers = ?,
config = ?
WHERE app_name = ? AND path = ?;`,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
route.AppName,
route.Path,
)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrRoutesNotFound
}
return nil
})
if err != nil {
return nil, err
}
return &route, nil
}
/*
RemoveRoute removes an existing route on MySQL.
*/
func (ds *MySQLDatastore) RemoveRoute(ctx context.Context, appName, routePath string) error {
deleteStm := `DELETE FROM routes WHERE path = ? AND app_name = ?`
return datastoreutil.SQLRemoveRoute(ds.db, appName, routePath, deleteStm)
}
/*
GetRoute retrieves a route from MySQL.
*/
func (ds *MySQLDatastore) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
rSelectCondition := "%s WHERE app_name=? AND path=?"
return datastoreutil.SQLGetRoute(ds.db, appName, routePath, rSelectCondition, routeSelector)
}
/*
GetRoutes retrieves an array of routes according to a specific filter.
*/
func (ds *MySQLDatastore) GetRoutes(ctx context.Context, filter *models.RouteFilter) ([]*models.Route, error) {
whereStm := "WHERE %s ?"
andStm := " AND %s ?"
return datastoreutil.SQLGetRoutes(ds.db, filter, routeSelector, whereStm, andStm)
}
/*
GetRoutesByApp retrieves a route with a specific app name.
*/
func (ds *MySQLDatastore) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) ([]*models.Route, error) {
whereStm := "WHERE %s ?"
andStm := " AND %s ?"
defaultFilterQuery := "WHERE app_name = ?"
return datastoreutil.SQLGetRoutesByApp(ds.db, appName, filter, routeSelector, defaultFilterQuery, whereStm, andStm)
}
/*
Put inserts an extra into MySQL.
*/
func (ds *MySQLDatastore) Put(ctx context.Context, key, value []byte) error {
_, err := ds.db.Exec(`
INSERT INTO extras (
id,
value
)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE
value = ?
`, string(key), string(value), string(value))
if err != nil {
return err
}
return nil
}
/*
Get retrieves the value of a specific extra from MySQL.
*/
func (ds *MySQLDatastore) Get(ctx context.Context, key []byte) ([]byte, error) {
row := ds.db.QueryRow("SELECT value FROM extras WHERE id=?", key)
var value string
err := row.Scan(&value)
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, err
}
return []byte(value), nil
}
/*
Tx Begins and commits a MySQL Transaction.
*/
func (ds *MySQLDatastore) Tx(f func(*sql.Tx) error) error {
tx, err := ds.db.Begin()
if err != nil {
return err
}
err = f(tx)
if err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}
func (ds *MySQLDatastore) InsertTask(ctx context.Context, task *models.Task) error {
stmt, err := ds.db.Prepare("INSERT calls SET id=?,created_at=?,started_at=?,completed_at=?,status=?,app_name=?,path=?")
if err != nil {
return err
}
_, err = stmt.Exec(task.ID, task.CreatedAt.String(),
task.StartedAt.String(), task.CompletedAt.String(),
task.Status, task.AppName, task.Path)
if err != nil {
return err
}
return nil
}
func (ds *MySQLDatastore) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
whereStm := "%s WHERE id=?"
return datastoreutil.SQLGetCall(ds.db, callSelector, callID, whereStm)
}
func (ds *MySQLDatastore) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
whereStm := "WHERE %s ?"
andStm := " AND %s ?"
return datastoreutil.SQLGetCalls(ds.db, callSelector, filter, whereStm, andStm)
}

View File

@@ -1,133 +0,0 @@
package mysql
import (
"bytes"
"database/sql"
"fmt"
"log"
"net/url"
"os"
"os/exec"
"strconv"
"testing"
"time"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoretest"
)
const tmpMysql = "mysql://root:root@tcp(%s:%d)/funcs"
var (
mysqlHost = func() string {
host := os.Getenv("MYSQL_HOST")
if host == "" {
host = "127.0.0.1"
}
return host
}()
mysqlPort = func() int {
port := os.Getenv("MYSQL_PORT")
if port == "" {
port = "3307"
}
p, err := strconv.Atoi(port)
if err != nil {
panic(err)
}
return p
}()
)
func prepareMysqlTest(logf, fatalf func(string, ...interface{})) (func(), func()) {
timeout := time.After(60 * time.Second)
wait := 2 * time.Second
var db *sql.DB
var err error
var buf bytes.Buffer
time.Sleep(time.Second * 25)
for {
db, err = sql.Open("mysql", fmt.Sprintf("root:root@tcp(%s:%v)/",
mysqlHost, mysqlPort))
if err != nil {
fmt.Fprintln(&buf, "failed to connect to mysql:", err)
fmt.Fprintln(&buf, "retrying in:", wait)
} else {
// Ping
if _, err = db.Exec("SELECT 1"); err == nil {
break
}
fmt.Fprintln(&buf, "failed to ping database:", err)
}
select {
case <-timeout:
fmt.Println(buf.String())
log.Fatal("timed out waiting for mysql")
case <-time.After(wait):
continue
}
}
_, err = db.Exec("DROP DATABASE IF EXISTS funcs;")
if err != nil {
fmt.Println("failed to drop database:", err)
}
_, err = db.Exec("CREATE DATABASE funcs;")
if err != nil {
fatalf("failed to create database: %s\n", err)
}
_, err = db.Exec(`GRANT ALL PRIVILEGES ON funcs.* TO root@localhost WITH GRANT OPTION;`)
if err != nil {
fatalf("failed to grant priviledges to user 'mysql: %s\n", err)
panic(err)
}
fmt.Println("mysql for test ready")
return func() {
db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%s:%d)/",
mysqlHost, mysqlPort))
if err != nil {
fatalf("failed to connect for truncation: %s\n", err)
}
for _, table := range []string{"routes", "apps", "extras"} {
_, err = db.Exec(`TRUNCATE TABLE ` + table)
if err != nil {
fatalf("failed to truncate table %q: %s\n", table, err)
}
}
},
func() {
tryRun(logf, "stop mysql container", exec.Command("docker", "rm", "-vf", "func-mysql-test"))
}
}
func TestDatastore(t *testing.T) {
_, close := prepareMysqlTest(t.Logf, t.Fatalf)
defer close()
u, err := url.Parse(fmt.Sprintf(tmpMysql, mysqlHost, mysqlPort))
if err != nil {
t.Fatalf("failed to parse url: %s\n", err)
}
ds, err := New(u)
if err != nil {
t.Fatalf("failed to create mysql datastore: %s\n", err)
}
datastoretest.Test(t, ds)
}
func tryRun(logf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
logf("failed to %s: %s", desc, b.String())
}
}
func mustRun(fatalf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
fatalf("failed to %s: %s", desc, b.String())
}
}

View File

@@ -1,402 +0,0 @@
package postgres
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"github.com/lib/pq"
_ "github.com/lib/pq"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
const routesTableCreate = `
CREATE TABLE IF NOT EXISTS routes (
app_name character varying(256) NOT NULL,
path text NOT NULL,
image character varying(256) NOT NULL,
format character varying(16) NOT NULL,
memory integer NOT NULL,
timeout integer NOT NULL,
idle_timeout integer NOT NULL,
type character varying(16) NOT NULL,
headers text NOT NULL,
config text NOT NULL,
PRIMARY KEY (app_name, path)
);`
const appsTableCreate = `CREATE TABLE IF NOT EXISTS apps (
name character varying(256) NOT NULL PRIMARY KEY,
config text NOT NULL
);`
const extrasTableCreate = `CREATE TABLE IF NOT EXISTS extras (
key character varying(256) NOT NULL PRIMARY KEY,
value character varying(256) NOT NULL
);`
const routeSelector = `SELECT app_name, path, image, format, memory, type, timeout, idle_timeout, headers, config FROM routes`
const callsTableCreate = `CREATE TABLE IF NOT EXISTS calls (
created_at character varying(256) NOT NULL,
started_at character varying(256) NOT NULL,
completed_at character varying(256) NOT NULL,
status character varying(256) NOT NULL,
id character varying(256) NOT NULL,
app_name character varying(256) NOT NULL,
path character varying(256) NOT NULL,
PRIMARY KEY (id)
);`
const callSelector = `SELECT id, created_at, started_at, completed_at, status, app_name, path FROM calls`
type PostgresDatastore struct {
db *sql.DB
}
func New(url *url.URL) (models.Datastore, error) {
tables := []string{routesTableCreate, appsTableCreate, extrasTableCreate, callsTableCreate}
sqlDatastore := &PostgresDatastore{}
dialect := "postgres"
db, err := datastoreutil.NewDatastore(url.String(), dialect, tables)
if err != nil {
return nil, err
}
sqlDatastore.db = db
return datastoreutil.NewValidator(sqlDatastore), nil
}
func (ds *PostgresDatastore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
var cbyte []byte
var err error
if app.Config != nil {
cbyte, err = json.Marshal(app.Config)
if err != nil {
return nil, err
}
}
_, err = ds.db.Exec(`INSERT INTO apps (name, config) VALUES ($1, $2);`,
app.Name,
string(cbyte),
)
if err != nil {
pqErr := err.(*pq.Error)
if pqErr.Code == "23505" {
return nil, models.ErrAppsAlreadyExists
}
return nil, err
}
return app, nil
}
func (ds *PostgresDatastore) UpdateApp(ctx context.Context, newapp *models.App) (*models.App, error) {
app := &models.App{Name: newapp.Name}
err := ds.Tx(func(tx *sql.Tx) error {
row := ds.db.QueryRow("SELECT config FROM apps WHERE name=$1", app.Name)
var config string
if err := row.Scan(&config); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
return err
}
if len(config) > 0 {
err := json.Unmarshal([]byte(config), &app.Config)
if err != nil {
return err
}
}
app.UpdateConfig(newapp.Config)
cbyte, err := json.Marshal(app.Config)
if err != nil {
return err
}
res, err := ds.db.Exec(`UPDATE apps SET config = $2 WHERE name = $1;`, app.Name, string(cbyte))
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrAppsNotFound
}
return nil
})
if err != nil {
return nil, err
}
return app, nil
}
func (ds *PostgresDatastore) RemoveApp(ctx context.Context, appName string) error {
_, err := ds.db.Exec(`DELETE FROM apps WHERE name = $1`, appName)
return err
}
func (ds *PostgresDatastore) GetApp(ctx context.Context, name string) (*models.App, error) {
queryStr := "SELECT name, config FROM apps WHERE name=$1"
queryArgs := []interface{}{name}
return datastoreutil.SQLGetApp(ds.db, queryStr, queryArgs...)
}
func (ds *PostgresDatastore) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
whereStm := "WHERE name LIKE $1"
selectStm := "SELECT DISTINCT * FROM apps %s"
return datastoreutil.SQLGetApps(ds.db, filter, whereStm, selectStm)
}
func (ds *PostgresDatastore) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return nil, err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return nil, err
}
err = ds.Tx(func(tx *sql.Tx) error {
r := tx.QueryRow(`SELECT 1 FROM apps WHERE name=$1`, route.AppName)
if err := r.Scan(new(int)); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
return err
}
same, err := tx.Query(`SELECT 1 FROM routes WHERE app_name=$1 AND path=$2`,
route.AppName, route.Path)
if err != nil {
return err
}
defer same.Close()
if same.Next() {
return models.ErrRoutesAlreadyExists
}
_, err = tx.Exec(`
INSERT INTO routes (
app_name,
path,
image,
format,
memory,
type,
timeout,
idle_timeout,
headers,
config
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10);`,
route.AppName,
route.Path,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
)
return err
})
if err != nil {
return nil, err
}
return route, nil
}
func (ds *PostgresDatastore) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
var route models.Route
err := ds.Tx(func(tx *sql.Tx) error {
row := ds.db.QueryRow(fmt.Sprintf("%s WHERE app_name=$1 AND path=$2", routeSelector), newroute.AppName, newroute.Path)
if err := datastoreutil.ScanRoute(row, &route); err == sql.ErrNoRows {
return models.ErrRoutesNotFound
} else if err != nil {
return err
}
route.Update(newroute)
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return err
}
res, err := tx.Exec(`
UPDATE routes SET
image = $3,
format = $4,
memory = $5,
type = $6,
timeout = $7,
idle_timeout = $8,
headers = $9,
config = $10
WHERE app_name = $1 AND path = $2;`,
route.AppName,
route.Path,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrRoutesNotFound
}
return nil
})
if err != nil {
return nil, err
}
return &route, nil
}
func (ds *PostgresDatastore) RemoveRoute(ctx context.Context, appName, routePath string) error {
deleteStm := `DELETE FROM routes WHERE path = $1 AND app_name = $2`
return datastoreutil.SQLRemoveRoute(ds.db, appName, routePath, deleteStm)
}
func (ds *PostgresDatastore) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
rSelectCondition := "%s WHERE app_name=$1 AND path=$2"
return datastoreutil.SQLGetRoute(ds.db, appName, routePath, rSelectCondition, routeSelector)
}
func (ds *PostgresDatastore) GetRoutes(ctx context.Context, filter *models.RouteFilter) ([]*models.Route, error) {
whereStm := "WHERE %s $1"
andStm := " AND %s $%d"
return datastoreutil.SQLGetRoutes(ds.db, filter, routeSelector, whereStm, andStm)
}
func (ds *PostgresDatastore) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) ([]*models.Route, error) {
defaultFilterQuery := "WHERE app_name = $1"
whereStm := "WHERE %s $1"
andStm := " AND %s $%d"
return datastoreutil.SQLGetRoutesByApp(ds.db, appName, filter, routeSelector, defaultFilterQuery, whereStm, andStm)
}
func (ds *PostgresDatastore) Put(ctx context.Context, key, value []byte) error {
_, err := ds.db.Exec(`
INSERT INTO extras (
key,
value
)
VALUES ($1, $2)
ON CONFLICT (key) DO UPDATE SET
value = $2;
`, string(key), string(value))
if err != nil {
return err
}
return nil
}
func (ds *PostgresDatastore) Get(ctx context.Context, key []byte) ([]byte, error) {
row := ds.db.QueryRow("SELECT value FROM extras WHERE key=$1", key)
var value string
err := row.Scan(&value)
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, err
}
return []byte(value), nil
}
func (ds *PostgresDatastore) Tx(f func(*sql.Tx) error) error {
tx, err := ds.db.Begin()
if err != nil {
return err
}
err = f(tx)
if err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}
func (ds *PostgresDatastore) InsertTask(ctx context.Context, task *models.Task) error {
err := ds.Tx(func(tx *sql.Tx) error {
_, err := tx.Exec(
`INSERT INTO calls (
id,
created_at,
started_at,
completed_at,
status,
app_name,
path) VALUES ($1, $2, $3, $4, $5, $6, $7);`,
task.ID,
task.CreatedAt.String(),
task.StartedAt.String(),
task.CompletedAt.String(),
task.Status,
task.AppName,
task.Path,
)
return err
})
return err
}
func (ds *PostgresDatastore) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
whereStm := "%s WHERE id=$1"
return datastoreutil.SQLGetCall(ds.db, callSelector, callID, whereStm)
}
func (ds *PostgresDatastore) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
whereStm := "WHERE %s $1"
andStm := " AND %s $2"
return datastoreutil.SQLGetCalls(ds.db, callSelector, filter, whereStm, andStm)
}

View File

@@ -1,122 +0,0 @@
package postgres
import (
"bytes"
"database/sql"
"fmt"
"log"
"net/url"
"os"
"os/exec"
"strconv"
"testing"
"time"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoretest"
)
const tmpPostgres = "postgres://postgres@%s:%d/funcs?sslmode=disable"
var (
postgresHost = func() string {
host := os.Getenv("POSTGRES_HOST")
if host == "" {
host = "127.0.0.1"
}
return host
}()
postgresPort = func() int {
port := os.Getenv("POSTGRES_PORT")
if port == "" {
port = "15432"
}
p, err := strconv.Atoi(port)
if err != nil {
panic(err)
}
return p
}()
)
func preparePostgresTest(logf, fatalf func(string, ...interface{})) (func(), func()) {
timeout := time.After(20 * time.Second)
wait := 500 * time.Millisecond
for {
db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%s:%d?sslmode=disable",
postgresHost, postgresPort))
if err != nil {
fmt.Println("failed to connect to postgres:", err)
fmt.Println("retrying in:", wait)
} else {
_, err = db.Exec(`CREATE DATABASE funcs;`)
if err != nil {
fmt.Println("failed to create database:", err)
fmt.Println("retrying in:", wait)
} else {
_, err = db.Exec(`GRANT ALL PRIVILEGES ON DATABASE funcs TO postgres;`)
if err == nil {
break
}
fmt.Println("failed to grant privileges:", err)
fmt.Println("retrying in:", wait)
}
}
select {
case <-timeout:
log.Fatal("timed out waiting for postgres")
case <-time.After(wait):
continue
}
}
fmt.Println("postgres for test ready")
return func() {
db, err := sql.Open("postgres", fmt.Sprintf(tmpPostgres, postgresHost, postgresPort))
if err != nil {
fatalf("failed to connect for truncation: %s\n", err)
}
for _, table := range []string{"routes", "apps", "extras"} {
_, err = db.Exec(`TRUNCATE TABLE ` + table)
if err != nil {
fatalf("failed to truncate table %q: %s\n", table, err)
}
}
},
func() {
tryRun(logf, "stop postgres container", exec.Command("docker", "rm", "-fv", "func-postgres-test"))
}
}
func TestDatastore(t *testing.T) {
_, close := preparePostgresTest(t.Logf, t.Fatalf)
defer close()
u, err := url.Parse(fmt.Sprintf(tmpPostgres, postgresHost, postgresPort))
if err != nil {
t.Fatalf("failed to parse url: %v", err)
}
ds, err := New(u)
if err != nil {
t.Fatalf("failed to create postgres datastore: %v", err)
}
datastoretest.Test(t, ds)
}
func tryRun(logf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
logf("failed to %s: %s", desc, b.String())
}
}
func mustRun(fatalf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
fatalf("failed to %s: %s", desc, b.String())
}
}

View File

@@ -1,399 +0,0 @@
package redis
import (
"context"
"encoding/json"
"fmt"
"net/url"
"regexp"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/garyburd/redigo/redis"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
type RedisDataStore struct {
pool *redis.Pool
}
func New(url *url.URL) (models.Datastore, error) {
pool := &redis.Pool{
MaxIdle: 512,
// I'm not sure if allowing the pool to block if more than 16 connections are required is a good idea.
MaxActive: 512,
Wait: true,
IdleTimeout: 300 * time.Second,
Dial: func() (redis.Conn, error) {
return redis.DialURL(url.String())
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
// Force a connection so we can fail in case of error.
conn := pool.Get()
if err := conn.Err(); err != nil {
logrus.WithError(err).Fatal("Error connecting to redis")
}
ds := &RedisDataStore{
pool: pool,
}
return datastoreutil.NewValidator(ds), nil
}
func (ds *RedisDataStore) setApp(app *models.App) (*models.App, error) {
appBytes, err := json.Marshal(app)
if err != nil {
return nil, err
}
conn := ds.pool.Get()
defer conn.Close()
if _, err := conn.Do("HSET", "apps", app.Name, appBytes); err != nil {
return nil, err
}
return app, nil
}
func (ds *RedisDataStore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HEXISTS", "apps", app.Name)
if err != nil {
return nil, err
}
if exists, err := redis.Bool(reply, err); err != nil {
return nil, err
} else if exists {
return nil, models.ErrAppsAlreadyExists
}
return ds.setApp(app)
}
func (ds *RedisDataStore) UpdateApp(ctx context.Context, newapp *models.App) (*models.App, error) {
app, err := ds.GetApp(ctx, newapp.Name)
if err != nil {
return nil, err
}
app.UpdateConfig(newapp.Config)
return ds.setApp(app)
}
func (ds *RedisDataStore) RemoveApp(ctx context.Context, appName string) error {
conn := ds.pool.Get()
defer conn.Close()
if _, err := conn.Do("HDEL", "apps", appName); err != nil {
return err
}
return nil
}
func (ds *RedisDataStore) GetApp(ctx context.Context, name string) (*models.App, error) {
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGET", "apps", name)
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrAppsNotFound
}
res := &models.App{}
if err := json.Unmarshal(reply.([]byte), res); err != nil {
return nil, err
}
return res, nil
}
func (ds *RedisDataStore) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
res := []*models.App{}
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGETALL", "apps")
if err != nil {
return nil, err
}
apps, err := redis.StringMap(reply, err)
if err != nil {
return nil, err
}
for _, v := range apps {
var app models.App
if err := json.Unmarshal([]byte(v), &app); err != nil {
return nil, err
}
if applyAppFilter(&app, filter) {
res = append(res, &app)
}
}
return res, nil
}
func (ds *RedisDataStore) setRoute(set string, route *models.Route) (*models.Route, error) {
buf, err := json.Marshal(route)
if err != nil {
return nil, err
}
conn := ds.pool.Get()
defer conn.Close()
if _, err := conn.Do("HSET", set, route.Path, buf); err != nil {
return nil, err
}
return route, nil
}
func (ds *RedisDataStore) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HEXISTS", "apps", route.AppName)
if err != nil {
return nil, err
}
if exists, err := redis.Bool(reply, err); err != nil {
return nil, err
} else if !exists {
return nil, models.ErrAppsNotFound
}
hset := fmt.Sprintf("routes:%s", route.AppName)
reply, err = conn.Do("HEXISTS", hset, route.Path)
if err != nil {
return nil, err
}
if exists, err := redis.Bool(reply, err); err != nil {
return nil, err
} else if exists {
return nil, models.ErrRoutesAlreadyExists
}
return ds.setRoute(hset, route)
}
func (ds *RedisDataStore) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
route, err := ds.GetRoute(ctx, newroute.AppName, newroute.Path)
if err != nil {
return nil, err
}
route.Update(newroute)
hset := fmt.Sprintf("routes:%s", route.AppName)
return ds.setRoute(hset, route)
}
func (ds *RedisDataStore) RemoveRoute(ctx context.Context, appName, routePath string) error {
hset := fmt.Sprintf("routes:%s", appName)
conn := ds.pool.Get()
defer conn.Close()
if n, err := conn.Do("HDEL", hset, routePath); err != nil {
return err
} else if n == 0 {
return models.ErrRoutesRemoving
}
return nil
}
func (ds *RedisDataStore) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
hset := fmt.Sprintf("routes:%s", appName)
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGET", hset, routePath)
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrRoutesNotFound
}
var route models.Route
if err := json.Unmarshal(reply.([]byte), &route); err != nil {
return nil, err
}
return &route, nil
}
func (ds *RedisDataStore) GetRoutes(ctx context.Context, filter *models.RouteFilter) ([]*models.Route, error) {
res := []*models.Route{}
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HKEYS", "apps")
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrRoutesNotFound
}
paths, err := redis.Strings(reply, err)
for _, path := range paths {
hset := fmt.Sprintf("routes:%s", path)
reply, err := conn.Do("HGETALL", hset)
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrRoutesNotFound
}
routes, err := redis.StringMap(reply, err)
for _, v := range routes {
var route models.Route
if err := json.Unmarshal([]byte(v), &route); err != nil {
return nil, err
}
if applyRouteFilter(&route, filter) {
res = append(res, &route)
}
}
}
return res, nil
}
func (ds *RedisDataStore) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) ([]*models.Route, error) {
if filter == nil {
filter = new(models.RouteFilter)
}
filter.AppName = appName
res := []*models.Route{}
hset := fmt.Sprintf("routes:%s", appName)
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGETALL", hset)
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrRoutesNotFound
}
routes, err := redis.StringMap(reply, err)
for _, v := range routes {
var route models.Route
if err := json.Unmarshal([]byte(v), &route); err != nil {
return nil, err
}
if applyRouteFilter(&route, filter) {
res = append(res, &route)
}
}
return res, nil
}
func (ds *RedisDataStore) Put(ctx context.Context, key, value []byte) error {
conn := ds.pool.Get()
defer conn.Close()
if _, err := conn.Do("HSET", "extras", key, value); err != nil {
return err
}
return nil
}
func (ds *RedisDataStore) Get(ctx context.Context, key []byte) ([]byte, error) {
conn := ds.pool.Get()
defer conn.Close()
value, err := conn.Do("HGET", "extras", key)
if err != nil {
return nil, err
}
return value.([]byte), nil
}
func applyAppFilter(app *models.App, filter *models.AppFilter) bool {
if filter != nil && filter.Name != "" {
nameLike, err := regexp.MatchString(strings.Replace(filter.Name, "%", ".*", -1), app.Name)
return err == nil && nameLike
}
return true
}
func applyRouteFilter(route *models.Route, filter *models.RouteFilter) bool {
return filter == nil || (filter.Path == "" || route.Path == filter.Path) &&
(filter.AppName == "" || route.AppName == filter.AppName) &&
(filter.Image == "" || route.Image == filter.Image)
}
func applyCallFilter(call *models.FnCall, filter *models.CallFilter) bool {
return filter == nil || (filter.Path == "" || call.Path == filter.Path) &&
(filter.AppName == "" || call.AppName == filter.AppName)
}
func (ds *RedisDataStore) InsertTask(ctx context.Context, task *models.Task) error {
taskBytes, err := json.Marshal(task)
if err != nil {
return err
}
conn := ds.pool.Get()
defer conn.Close()
if _, err := conn.Do("HSET", "calls", task.ID, taskBytes); err != nil {
return err
}
return nil
}
func (ds *RedisDataStore) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGET", "calls", callID)
if err != nil {
return nil, err
} else if reply == nil {
return nil, models.ErrCallNotFound
}
res := &models.FnCall{}
if err := json.Unmarshal(reply.([]byte), res); err != nil {
return nil, err
}
return res, nil
}
func (ds *RedisDataStore) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
res := models.FnCalls{}
conn := ds.pool.Get()
defer conn.Close()
reply, err := conn.Do("HGETALL", "calls")
if err != nil {
return nil, err
}
calls, err := redis.StringMap(reply, err)
if err != nil {
return nil, err
}
for _, v := range calls {
var call models.FnCall
if err := json.Unmarshal([]byte(v), &call); err != nil {
return nil, err
}
if applyCallFilter(&call, filter) {
res = append(res, &call)
}
}
return res, nil
}

View File

@@ -1,98 +0,0 @@
package redis
import (
"bytes"
"fmt"
"log"
"net/url"
"os"
"os/exec"
"strconv"
"testing"
"time"
"github.com/garyburd/redigo/redis"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoretest"
)
const tmpRedis = "redis://%s:%d/"
var (
redisHost = func() string {
host := os.Getenv("REDIS_HOST")
if host == "" {
host = "127.0.0.1"
}
return host
}()
redisPort = func() int {
port := os.Getenv("REDIS_PORT")
if port == "" {
port = "6301"
}
p, err := strconv.Atoi(port)
if err != nil {
panic(err)
}
return p
}()
)
func prepareRedisTest(logf, fatalf func(string, ...interface{})) (func(), func()) {
timeout := time.After(20 * time.Second)
for {
c, err := redis.DialURL(fmt.Sprintf(tmpRedis, redisHost, redisPort))
if err == nil {
_, err = c.Do("PING")
c.Close()
if err == nil {
break
}
}
fmt.Println("failed to PING redis:", err)
select {
case <-timeout:
log.Fatal("timed out waiting for redis")
case <-time.After(500 * time.Millisecond):
continue
}
}
fmt.Println("redis for test ready")
return func() {},
func() {
tryRun(logf, "stop redis container", exec.Command("docker", "rm", "-fv", "func-redis-test"))
}
}
func TestDatastore(t *testing.T) {
_, close := prepareRedisTest(t.Logf, t.Fatalf)
defer close()
u, err := url.Parse(fmt.Sprintf(tmpRedis, redisHost, redisPort))
if err != nil {
t.Fatal("failed to parse url: ", err)
}
ds, err := New(u)
if err != nil {
t.Fatal("failed to create redis datastore:", err)
}
datastoretest.Test(t, ds)
}
func tryRun(logf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
logf("failed to %s: %s", desc, b.String())
}
}
func mustRun(fatalf func(string, ...interface{}), desc string, cmd *exec.Cmd) {
var b bytes.Buffer
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
fatalf("failed to %s: %s", desc, b.String())
}
}

759
api/datastore/sql/sql.go Normal file
View File

@@ -0,0 +1,759 @@
package sql
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
_ "github.com/lib/pq"
"github.com/mattn/go-sqlite3"
_ "github.com/mattn/go-sqlite3"
"gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
// this aims to be an ANSI-SQL compliant package that uses only question
// mark syntax for var placement, leaning on sqlx to make compatible all
// queries to the actual underlying datastore.
//
// currently tested and working are postgres, mysql and sqlite3.
var tables = [...]string{`CREATE TABLE IF NOT EXISTS routes (
app_name varchar(256) NOT NULL,
path varchar(256) NOT NULL,
image varchar(256) NOT NULL,
format varchar(16) NOT NULL,
memory int NOT NULL,
timeout int NOT NULL,
idle_timeout int NOT NULL,
type varchar(16) NOT NULL,
headers text NOT NULL,
config text NOT NULL,
PRIMARY KEY (app_name, path)
);`,
`CREATE TABLE IF NOT EXISTS apps (
name varchar(256) NOT NULL PRIMARY KEY,
config text NOT NULL
);`,
`CREATE TABLE IF NOT EXISTS calls (
created_at varchar(256) NOT NULL,
started_at varchar(256) NOT NULL,
completed_at varchar(256) NOT NULL,
status varchar(256) NOT NULL,
id varchar(256) NOT NULL,
app_name varchar(256) NOT NULL,
path varchar(256) NOT NULL,
PRIMARY KEY (id)
);`,
`CREATE TABLE IF NOT EXISTS logs (
id varchar(256) NOT NULL PRIMARY KEY,
log text NOT NULL
);`,
}
const (
routeSelector = `SELECT app_name, path, image, format, memory, type, timeout, idle_timeout, headers, config FROM routes`
callSelector = `SELECT id, created_at, started_at, completed_at, status, app_name, path FROM calls`
)
type sqlStore struct {
db *sqlx.DB
// TODO we should prepare all of the statements, rebind them
// and store them all here.
}
// New will open the db specified by url, create any tables necessary
// and return a models.Datastore safe for concurrent usage.
func New(url *url.URL) (models.Datastore, error) {
driver := url.Scheme
// driver must be one of these for sqlx to work, double check:
switch driver {
case "postgres", "pgx", "mysql", "sqlite3", "oci8", "ora", "goracle":
default:
return nil, errors.New("invalid db driver, refer to the code")
}
if driver == "sqlite3" {
// make all the dirs so we can make the file..
dir := filepath.Dir(url.Path)
err := os.MkdirAll(dir, 0755)
if err != nil {
return nil, err
}
}
uri := url.String()
if driver != "postgres" {
// postgres seems to need this as a prefix in lib/pq, everyone else wants it stripped of scheme
uri = strings.TrimPrefix(url.String(), url.Scheme+"://")
}
sqldb, err := sql.Open(driver, uri)
if err != nil {
logrus.WithFields(logrus.Fields{"url": uri}).WithError(err).Error("couldn't open db")
return nil, err
}
db := sqlx.NewDb(sqldb, driver)
// force a connection and test that it worked
err = db.Ping()
if err != nil {
logrus.WithFields(logrus.Fields{"url": uri}).WithError(err).Error("couldn't ping db")
return nil, err
}
maxIdleConns := 30 // c.MaxIdleConnections
db.SetMaxIdleConns(maxIdleConns)
logrus.WithFields(logrus.Fields{"max_idle_connections": maxIdleConns, "datastore": driver}).Info("datastore dialed")
for _, v := range tables {
_, err = db.Exec(v)
if err != nil {
return nil, err
}
}
sqlDatastore := &sqlStore{db: db}
return datastoreutil.NewValidator(sqlDatastore), nil
}
func (ds *sqlStore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
var cbyte []byte
var err error
if app.Config != nil {
cbyte, err = json.Marshal(app.Config)
if err != nil {
return nil, err
}
}
query := ds.db.Rebind("INSERT INTO apps (name, config) VALUES (?, ?);")
_, err = ds.db.Exec(query, app.Name, string(cbyte))
if err != nil {
switch err := err.(type) {
case *mysql.MySQLError:
if err.Number == 1062 {
return nil, models.ErrAppsAlreadyExists
}
case *pq.Error:
if err.Code == "23505" {
return nil, models.ErrAppsAlreadyExists
}
case sqlite3.Error:
if err.ExtendedCode == sqlite3.ErrConstraintUnique || err.ExtendedCode == sqlite3.ErrConstraintPrimaryKey {
return nil, models.ErrAppsAlreadyExists
}
}
return nil, err
}
return app, nil
}
func (ds *sqlStore) UpdateApp(ctx context.Context, newapp *models.App) (*models.App, error) {
app := &models.App{Name: newapp.Name}
err := ds.Tx(func(tx *sqlx.Tx) error {
query := tx.Rebind(`SELECT config FROM apps WHERE name=?`)
row := tx.QueryRow(query, app.Name)
var config string
if err := row.Scan(&config); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
return err
}
if config != "" {
err := json.Unmarshal([]byte(config), &app.Config)
if err != nil {
return err
}
}
app.UpdateConfig(newapp.Config)
cbyte, err := json.Marshal(app.Config)
if err != nil {
return err
}
query = tx.Rebind(`UPDATE apps SET config=? WHERE name=?`)
res, err := tx.Exec(query, string(cbyte), app.Name)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrAppsNotFound
}
return nil
})
if err != nil {
return nil, err
}
return app, nil
}
func (ds *sqlStore) RemoveApp(ctx context.Context, appName string) error {
query := ds.db.Rebind(`DELETE FROM apps WHERE name = ?`)
_, err := ds.db.Exec(query, appName)
return err
}
func (ds *sqlStore) GetApp(ctx context.Context, name string) (*models.App, error) {
query := ds.db.Rebind(`SELECT name, config FROM apps WHERE name=?`)
row := ds.db.QueryRow(query, name)
var resName, config string
err := row.Scan(&resName, &config)
if err != nil {
if err == sql.ErrNoRows {
return nil, models.ErrAppsNotFound
}
return nil, err
}
res := &models.App{
Name: resName,
}
if len(config) > 0 {
err := json.Unmarshal([]byte(config), &res.Config)
if err != nil {
return nil, err
}
}
return res, nil
}
// GetApps retrieves an array of apps according to a specific filter.
func (ds *sqlStore) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
res := []*models.App{}
query, args := buildFilterAppQuery(filter)
query = ds.db.Rebind(fmt.Sprintf("SELECT DISTINCT name, config FROM apps %s", query))
rows, err := ds.db.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var app models.App
err := scanApp(rows, &app)
if err != nil {
if err == sql.ErrNoRows {
return res, nil
}
return res, err
}
res = append(res, &app)
}
if err := rows.Err(); err != nil {
return res, err
}
return res, nil
}
func (ds *sqlStore) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return nil, err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return nil, err
}
err = ds.Tx(func(tx *sqlx.Tx) error {
query := tx.Rebind(`SELECT 1 FROM apps WHERE name=?`)
r := tx.QueryRow(query, route.AppName)
if err := r.Scan(new(int)); err != nil {
if err == sql.ErrNoRows {
return models.ErrAppsNotFound
}
}
query = tx.Rebind(`SELECT 1 FROM routes WHERE app_name=? AND path=?`)
same, err := tx.Query(query, route.AppName, route.Path)
if err != nil {
return err
}
defer same.Close()
if same.Next() {
return models.ErrRoutesAlreadyExists
}
query = tx.Rebind(`INSERT INTO routes (
app_name,
path,
image,
format,
memory,
type,
timeout,
idle_timeout,
headers,
config
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`)
_, err = tx.Exec(query,
route.AppName,
route.Path,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
)
return err
})
return route, err
}
func (ds *sqlStore) UpdateRoute(ctx context.Context, newroute *models.Route) (*models.Route, error) {
var route models.Route
err := ds.Tx(func(tx *sqlx.Tx) error {
query := tx.Rebind(fmt.Sprintf("%s WHERE app_name=? AND path=?", routeSelector))
row := tx.QueryRow(query, newroute.AppName, newroute.Path)
if err := scanRoute(row, &route); err == sql.ErrNoRows {
return models.ErrRoutesNotFound
} else if err != nil {
return err
}
route.Update(newroute)
hbyte, err := json.Marshal(route.Headers)
if err != nil {
return err
}
cbyte, err := json.Marshal(route.Config)
if err != nil {
return err
}
query = tx.Rebind(`UPDATE routes SET
image = ?,
format = ?,
memory = ?,
type = ?,
timeout = ?,
idle_timeout = ?,
headers = ?,
config = ?
WHERE app_name=? AND path=?;`)
res, err := tx.Exec(query,
route.Image,
route.Format,
route.Memory,
route.Type,
route.Timeout,
route.IdleTimeout,
string(hbyte),
string(cbyte),
route.AppName,
route.Path,
)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
return err
} else if n == 0 {
return models.ErrRoutesNotFound
}
return nil
})
if err != nil {
return nil, err
}
return &route, nil
}
func (ds *sqlStore) RemoveRoute(ctx context.Context, appName, routePath string) error {
query := ds.db.Rebind(`DELETE FROM routes WHERE path = ? AND app_name = ?`)
res, err := ds.db.Exec(query, routePath, appName)
if err != nil {
return err
}
n, err := res.RowsAffected()
if err != nil {
return err
}
if n == 0 {
return models.ErrRoutesRemoving
}
return nil
}
func (ds *sqlStore) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
rSelectCondition := "%s WHERE app_name=? AND path=?"
query := ds.db.Rebind(fmt.Sprintf(rSelectCondition, routeSelector))
row := ds.db.QueryRow(query, appName, routePath)
var route models.Route
err := scanRoute(row, &route)
if err == sql.ErrNoRows {
return nil, models.ErrRoutesNotFound
} else if err != nil {
return nil, err
}
return &route, nil
}
// GetRoutes retrieves an array of routes according to a specific filter.
func (ds *sqlStore) GetRoutes(ctx context.Context, filter *models.RouteFilter) ([]*models.Route, error) {
res := []*models.Route{}
query, args := buildFilterRouteQuery(filter)
query = fmt.Sprintf("%s %s", routeSelector, query)
query = ds.db.Rebind(query)
rows, err := ds.db.Query(query, args...)
// todo: check for no rows so we don't respond with a sql 500 err
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var route models.Route
err := scanRoute(rows, &route)
if err != nil {
continue
}
res = append(res, &route)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
/*
GetRoutesByApp retrieves a route with a specific app name.
*/
func (ds *sqlStore) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) ([]*models.Route, error) {
res := []*models.Route{}
var filterQuery string
var args []interface{}
if filter == nil {
filterQuery = "WHERE app_name = ?"
args = []interface{}{appName}
} else {
filter.AppName = appName
filterQuery, args = buildFilterRouteQuery(filter)
}
query := fmt.Sprintf("%s %s", routeSelector, filterQuery)
query = ds.db.Rebind(query)
rows, err := ds.db.Query(query, args...)
// todo: check for no rows so we don't respond with a sql 500 err
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var route models.Route
err := scanRoute(rows, &route)
if err != nil {
continue
}
res = append(res, &route)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
func (ds *sqlStore) Tx(f func(*sqlx.Tx) error) error {
tx, err := ds.db.Beginx()
if err != nil {
return err
}
err = f(tx)
if err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}
func (ds *sqlStore) InsertTask(ctx context.Context, task *models.Task) error {
query := ds.db.Rebind(`INSERT INTO calls (
id,
created_at,
started_at,
completed_at,
status,
app_name,
path
)
VALUES (?, ?, ?, ?, ?, ?, ?);`)
_, err := ds.db.Exec(query, task.ID, task.CreatedAt.String(),
task.StartedAt.String(), task.CompletedAt.String(),
task.Status, task.AppName, task.Path)
if err != nil {
return err
}
return nil
}
func (ds *sqlStore) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
query := fmt.Sprintf(`%s WHERE id=?`, callSelector)
query = ds.db.Rebind(query)
row := ds.db.QueryRow(query, callID)
var call models.FnCall
err := scanCall(row, &call)
if err != nil {
return nil, err
}
return &call, nil
}
func (ds *sqlStore) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
res := models.FnCalls{}
query, args := buildFilterCallQuery(filter)
query = fmt.Sprintf("%s %s", callSelector, query)
query = ds.db.Rebind(query)
rows, err := ds.db.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var call models.FnCall
err := scanCall(rows, &call)
if err != nil {
continue
}
res = append(res, &call)
}
if err := rows.Err(); err != nil {
return nil, err
}
return res, nil
}
func (ds *sqlStore) InsertLog(ctx context.Context, callID, callLog string) error {
query := ds.db.Rebind(`INSERT INTO logs (id, log) VALUES (?, ?);`)
_, err := ds.db.Exec(query, callID, callLog)
return err
}
func (ds *sqlStore) GetLog(ctx context.Context, callID string) (*models.FnCallLog, error) {
query := ds.db.Rebind(`SELECT log FROM logs WHERE id=?`)
row := ds.db.QueryRow(query, callID)
var log string
err := row.Scan(&log)
if err != nil {
if err == sql.ErrNoRows {
return nil, models.ErrCallLogNotFound
}
return nil, err
}
return &models.FnCallLog{
CallID: callID,
Log: log,
}, nil
}
func (ds *sqlStore) DeleteLog(ctx context.Context, callID string) error {
query := ds.db.Rebind(`DELETE FROM logs WHERE id=?`)
_, err := ds.db.Exec(query, callID)
return err
}
// TODO scrap for sqlx scanx ?? some things aren't perfect (e.g. config is a json string)
type RowScanner interface {
Scan(dest ...interface{}) error
}
func ScanLog(scanner RowScanner, log *models.FnCallLog) error {
return scanner.Scan(
&log.CallID,
&log.Log,
)
}
func scanRoute(scanner RowScanner, route *models.Route) error {
var headerStr string
var configStr string
err := scanner.Scan(
&route.AppName,
&route.Path,
&route.Image,
&route.Format,
&route.Memory,
&route.Type,
&route.Timeout,
&route.IdleTimeout,
&headerStr,
&configStr,
)
if err != nil {
return err
}
if len(headerStr) > 0 {
err = json.Unmarshal([]byte(headerStr), &route.Headers)
if err != nil {
return err
}
}
if len(configStr) > 0 {
err = json.Unmarshal([]byte(configStr), &route.Config)
if err != nil {
return err
}
}
return nil
}
func scanApp(scanner RowScanner, app *models.App) error {
var configStr string
err := scanner.Scan(
&app.Name,
&configStr,
)
if err != nil {
return err
}
if len(configStr) > 0 {
err = json.Unmarshal([]byte(configStr), &app.Config)
if err != nil {
return err
}
}
return nil
}
func buildFilterRouteQuery(filter *models.RouteFilter) (string, []interface{}) {
if filter == nil {
return "", nil
}
var b bytes.Buffer
var args []interface{}
where := func(colOp, val string) {
if val != "" {
args = append(args, val)
if len(args) == 1 {
fmt.Fprintf(&b, `WHERE %s?`, colOp)
} else {
fmt.Fprintf(&b, ` AND %s?`, colOp)
}
}
}
where("path=", filter.Path)
where("app_name=", filter.AppName)
where("image=", filter.Image)
return b.String(), args
}
func buildFilterAppQuery(filter *models.AppFilter) (string, []interface{}) {
if filter == nil || filter.Name == "" {
return "", nil
}
return "WHERE name LIKE ?", []interface{}{filter.Name}
}
func buildFilterCallQuery(filter *models.CallFilter) (string, []interface{}) {
if filter == nil {
return "", nil
}
var b bytes.Buffer
var args []interface{}
where := func(colOp, val string) {
if val != "" {
args = append(args, val)
if len(args) == 1 {
fmt.Fprintf(&b, `WHERE %s?`, colOp)
} else {
fmt.Fprintf(&b, ` AND %s?`, colOp)
}
}
}
where("path=", filter.Path)
where("app_name=", filter.AppName)
return b.String(), args
}
func scanCall(scanner RowScanner, call *models.FnCall) error {
err := scanner.Scan(
&call.ID,
&call.CreatedAt,
&call.StartedAt,
&call.CompletedAt,
&call.Status,
&call.AppName,
&call.Path,
)
if err == sql.ErrNoRows {
return models.ErrCallNotFound
} else if err != nil {
return err
}
return nil
}

View File

@@ -1,126 +0,0 @@
package logs
import (
"encoding/json"
"net/url"
"os"
"path/filepath"
"time"
"context"
"github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
type BoltLogDatastore struct {
callLogsBucket []byte
db *bolt.DB
log logrus.FieldLogger
datastore models.Datastore
}
func NewBolt(url *url.URL) (models.FnLog, error) {
dir := filepath.Dir(url.Path)
log := logrus.WithFields(logrus.Fields{"logdb": url.Scheme, "dir": dir})
err := os.MkdirAll(dir, 0755)
if err != nil {
log.WithError(err).Errorln("Could not create data directory for log.db")
return nil, err
}
log.WithFields(logrus.Fields{"path": url.Path}).Debug("Creating bolt log.db")
db, err := bolt.Open(url.Path, 0655, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
log.WithError(err).Errorln("Error on bolt.Open")
return nil, err
}
// I don't think we need a prefix here do we? Made it blank. If we do, we should call the query param "prefix" instead of bucket.
bucketPrefix := ""
if url.Query()["bucket"] != nil {
bucketPrefix = url.Query()["bucket"][0]
}
callLogsBucketName := []byte(bucketPrefix + "call_logs")
err = db.Update(func(tx *bolt.Tx) error {
for _, name := range [][]byte{callLogsBucketName} {
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
log.WithError(err).WithFields(logrus.Fields{"name": name}).Error("create bucket")
return err
}
}
return nil
})
if err != nil {
log.WithError(err).Errorln("Error creating bolt buckets")
return nil, err
}
fnl := &BoltLogDatastore{
callLogsBucket: callLogsBucketName,
db: db,
log: log,
}
log.WithFields(logrus.Fields{"prefix": bucketPrefix, "file": url.Path}).Debug("BoltDB initialized")
return NewValidator(fnl), nil
}
func (fnl *BoltLogDatastore) InsertLog(ctx context.Context, callID string, callLog string) error {
log := &models.FnCallLog{
CallID: callID,
Log: callLog,
}
id := []byte(callID)
err := fnl.db.Update(
func(tx *bolt.Tx) error {
bIm := tx.Bucket(fnl.callLogsBucket)
buf, err := json.Marshal(log)
if err != nil {
return err
}
err = bIm.Put(id, buf)
if err != nil {
return err
}
return nil
})
return err
}
func (fnl *BoltLogDatastore) GetLog(ctx context.Context, callID string) (*models.FnCallLog, error) {
var res *models.FnCallLog
err := fnl.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(fnl.callLogsBucket)
v := b.Get([]byte(callID))
if v != nil {
fnCall := &models.FnCallLog{}
err := json.Unmarshal(v, fnCall)
if err != nil {
return nil
}
res = fnCall
} else {
return models.ErrCallLogNotFound
}
return nil
})
return res, err
}
func (fnl *BoltLogDatastore) DeleteLog(ctx context.Context, callID string) error {
_, err := fnl.GetLog(ctx, callID)
//means object does not exist
if err != nil {
return nil
}
id := []byte(callID)
err = fnl.db.Update(func(tx *bolt.Tx) error {
bIm := tx.Bucket(fnl.callLogsBucket)
err := bIm.Delete(id)
return err
})
return err
}

View File

@@ -1,33 +0,0 @@
package logs
import (
"net/url"
"os"
"testing"
"gitlab-odx.oracle.com/odx/functions/api/datastore/bolt"
logTesting "gitlab-odx.oracle.com/odx/functions/api/logs/testing"
)
const tmpLogDb = "/tmp/func_test_log.db"
const tmpDatastore = "/tmp/func_test_datastore.db"
func TestDatastore(t *testing.T) {
os.Remove(tmpLogDb)
os.Remove(tmpDatastore)
uLog, err := url.Parse("bolt://" + tmpLogDb)
if err != nil {
t.Fatalf("failed to parse url: %v", err)
}
uDatastore, err := url.Parse("bolt://" + tmpDatastore)
fnl, err := NewBolt(uLog)
if err != nil {
t.Fatalf("failed to create bolt log datastore: %v", err)
}
ds, err := bolt.New(uDatastore)
if err != nil {
t.Fatalf("failed to create bolt datastore: %v", err)
}
logTesting.Test(t, fnl, ds)
}

View File

@@ -2,9 +2,11 @@ package logs
import (
"fmt"
"github.com/Sirupsen/logrus"
"gitlab-odx.oracle.com/odx/functions/api/models"
"net/url"
"github.com/Sirupsen/logrus"
"gitlab-odx.oracle.com/odx/functions/api/datastore/sql"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
func New(dbURL string) (models.FnLog, error) {
@@ -12,10 +14,10 @@ func New(dbURL string) (models.FnLog, error) {
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"url": dbURL}).Fatal("bad DB URL")
}
logrus.WithFields(logrus.Fields{"db": u.Scheme}).Debug("creating new datastore")
logrus.WithFields(logrus.Fields{"db": u.Scheme}).Debug("creating log store")
switch u.Scheme {
case "bolt":
return NewBolt(u)
case "sqlite3", "postgres", "mysql":
return sql.New(u)
default:
return nil, fmt.Errorf("db type not supported %v", u.Scheme)
}

26
api/logs/log_test.go Normal file
View File

@@ -0,0 +1,26 @@
package logs
import (
"net/url"
"os"
"testing"
"gitlab-odx.oracle.com/odx/functions/api/datastore/sql"
logTesting "gitlab-odx.oracle.com/odx/functions/api/logs/testing"
)
const tmpLogDb = "/tmp/func_test_log.db"
func TestDatastore(t *testing.T) {
os.Remove(tmpLogDb)
uLog, err := url.Parse("sqlite3://" + tmpLogDb)
if err != nil {
t.Fatalf("failed to parse url: %v", err)
}
ds, err := sql.New(uLog)
if err != nil {
t.Fatalf("failed to create sqlite3 datastore: %v", err)
}
logTesting.Test(t, ds, ds)
}

View File

@@ -6,7 +6,6 @@ import (
)
type Datastore interface {
// GetApp gets an App by name.
// Returns ErrDatastoreEmptyAppName for empty appName.
// Returns ErrAppsNotFound if no app is found.
@@ -28,7 +27,7 @@ type Datastore interface {
// RemoveApp removes the App named appName. Returns ErrDatastoreEmptyAppName if appName is empty.
// Returns ErrAppsNotFound if an App is not found.
//TODO remove routes automatically? #528
// TODO remove routes automatically? #528
RemoveApp(ctx context.Context, appName string) error
// GetRoute looks up a matching Route for appName and the literal request route routePath.
@@ -63,10 +62,8 @@ type Datastore interface {
GetTask(ctx context.Context, callID string) (*FnCall, error)
GetTasks(ctx context.Context, filter *CallFilter) (FnCalls, error)
// The following provide a generic key value store for arbitrary data, can be used by extensions to store extra data
// todo: should we namespace these by app? Then when an app is deleted, it can delete any of this extra data too.
Put(context.Context, []byte, []byte) error
Get(context.Context, []byte) ([]byte, error)
// Implement FnLog methods for convenience
FnLog
}
var (

View File

@@ -5,7 +5,15 @@ import (
)
type FnLog interface {
// InsertLog will insert the log at callID, overwriting if it previously
// existed.
InsertLog(ctx context.Context, callID string, callLog string) error
// GetLog will return the log at callID, an error will be returned if the log
// cannot be found.
GetLog(ctx context.Context, callID string) (*FnCallLog, error)
// DeleteLog will remove the log at callID, it will not return an error if
// the log does not exist before removal.
DeleteLog(ctx context.Context, callID string) error
}

View File

@@ -23,8 +23,8 @@ func init() {
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.SetDefault(EnvLogLevel, "info")
viper.SetDefault(EnvMQURL, fmt.Sprintf("bolt://%s/data/worker_mq.db", cwd))
viper.SetDefault(EnvDBURL, fmt.Sprintf("bolt://%s/data/bolt.db?bucket=funcs", cwd))
viper.SetDefault(EnvLOGDBURL, fmt.Sprintf("bolt://%s/data/log.db?bucket=funcs", cwd))
viper.SetDefault(EnvDBURL, fmt.Sprintf("sqlite3://%s/data/fn.db", cwd))
viper.SetDefault(EnvLOGDBURL, "") // default to just using DB url
viper.SetDefault(EnvPort, 8080)
viper.SetDefault(EnvAPIURL, fmt.Sprintf("http://127.0.0.1:%d", viper.GetInt(EnvPort)))
viper.AutomaticEnv() // picks up env vars automatically

View File

@@ -70,9 +70,12 @@ func NewFromEnv(ctx context.Context) *Server {
logrus.WithError(err).Fatal("Error initializing message queue.")
}
logDB, err := logs.New(viper.GetString(EnvLOGDBURL))
if err != nil {
logrus.WithError(err).Fatal("Error initializing logs store.")
var logDB models.FnLog = ds
if ldb := viper.GetString(EnvLOGDBURL); ldb != "" && ldb != viper.GetString(EnvDBURL) {
logDB, err = logs.New(viper.GetString(EnvLOGDBURL))
if err != nil {
logrus.WithError(err).Fatal("Error initializing logs store.")
}
}
apiURL := viper.GetString(EnvAPIURL)

View File

@@ -13,15 +13,13 @@ import (
"github.com/gin-gonic/gin"
"gitlab-odx.oracle.com/odx/functions/api/datastore"
"gitlab-odx.oracle.com/odx/functions/api/logs"
"gitlab-odx.oracle.com/odx/functions/api/models"
"gitlab-odx.oracle.com/odx/functions/api/mqs"
"gitlab-odx.oracle.com/odx/functions/api/runner"
"gitlab-odx.oracle.com/odx/functions/api/server/internal/routecache"
)
var tmpDatastoreBolt = "/tmp/func_test_bolt_datastore.db"
var tmpLogBolt = "/tmp/func_test_bolt_log.db"
var tmpDatastoreTests = "/tmp/func_test_datastore.db"
func testServer(ds models.Datastore, mq models.MessageQueue, logDB models.FnLog, rnr *runner.Runner) *Server {
ctx := context.Background()
@@ -82,28 +80,23 @@ func getErrorResponse(t *testing.T, rec *httptest.ResponseRecorder) models.Error
return errResp
}
func prepareBolt(ctx context.Context, t *testing.T) (models.Datastore, models.FnLog, func()) {
os.Remove(tmpDatastoreBolt)
os.Remove(tmpLogBolt)
ds, err := datastore.New("bolt://" + tmpDatastoreBolt)
func prepareDB(ctx context.Context, t *testing.T) (models.Datastore, models.FnLog, func()) {
os.Remove(tmpDatastoreTests)
ds, err := datastore.New("sqlite3://" + tmpDatastoreTests)
if err != nil {
t.Fatalf("Error when creating datastore: %s", err)
}
logDB, err := logs.New("bolt://" + tmpLogBolt)
if err != nil {
t.Fatalf("Error when creating log store: %s", err)
}
logDB := ds
return ds, logDB, func() {
os.Remove(tmpDatastoreBolt)
os.Remove(tmpLogBolt)
os.Remove(tmpDatastoreTests)
}
}
func TestFullStack(t *testing.T) {
ctx := context.Background()
buf := setLogBuffer()
ds, logDB, closeBolt := prepareBolt(ctx, t)
defer closeBolt()
ds, logDB, close := prepareDB(ctx, t)
defer close()
rnr, rnrcancel := testRunner(t)
defer rnrcancel()

View File

@@ -1,9 +1,7 @@
set -ex
case "$1" in
"bolt" )
"sqlite3" )
docker rm -fv func-server || echo No prev func-server container
docker run --name func-server --privileged -v /var/run/docker.sock:/var/run/docker.sock -d -e NO_PROXY -e HTTP_PROXY -e DOCKER_HOST=${DOCKER_HOST} -e LOG_LEVEL=debug -p 8080:8080 funcy/functions
@@ -26,27 +24,13 @@ case "$1" in
docker rm -fv func-postgres-test || echo No prev test db container
docker rm -fv func-server || echo No prev func-server container
docker run --name func-postgres-test -e "POSTGRES_DB=funcs" -p 5432:5432 -d postgres
docker run --name func-postgres-test -e "POSTGRES_DB=funcs" -e "POSTGRES_PASSWORD=root" -p 5432:5432 -d postgres
sleep 8
export POSTGRES_HOST="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' func-postgres-test)"
export POSTGRES_PORT=5432
docker run --name func-server --privileged -d -e NO_PROXY -e HTTP_PROXY -e DOCKER_HOST=${DOCKER_HOST} -e LOG_LEVEL=debug -e "DB_URL=postgres://postgres@${POSTGRES_HOST}:${POSTGRES_PORT}/funcs?sslmode=disable" -p 8080:8080 -v /var/run/docker.sock:/var/run/docker.sock funcy/functions
docker run --name func-server --privileged -d -e NO_PROXY -e HTTP_PROXY -e DOCKER_HOST=${DOCKER_HOST} -e LOG_LEVEL=debug -e "DB_URL=postgres://postgres:root@${POSTGRES_HOST}:${POSTGRES_PORT}/funcs?sslmode=disable" -p 8080:8080 -v /var/run/docker.sock:/var/run/docker.sock funcy/functions
;;
"redis" )
docker rm -fv func-redis-test|| echo No prev redis test db container
docker rm -fv func-server || echo No prev func-server container
docker run --name func-redis-test -p 6379:6379 -d redis
sleep 8
export REDIS_HOST="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' func-redis-test)"
export REDIS_PORT=6379
docker run --name func-server --privileged -d -e NO_PROXY -e HTTP_PROXY -e DOCKER_HOST=${DOCKER_HOST} -e LOG_LEVEL=debug -e "DB_URL=redis://${REDIS_HOST}:${REDIS_PORT}/" -p 8080:8080 -v /var/run/docker.sock:/var/run/docker.sock funcy/functions
;;
esac
case ${DOCKER_LOCATION:-localhost} in

View File

@@ -26,7 +26,7 @@ function build () {
}
function run () {
docker run --rm --name functions -it -v /var/run/docker.sock:/var/run/docker.sock -e LOG_LEVEL=debug -e "DB_URL=bolt:///app/data/bolt.db" -v $PWD/data:/app/data -p 8080:8080 treeder/functions
docker run --rm --name functions -it -v /var/run/docker.sock:/var/run/docker.sock -e LOG_LEVEL=debug -e "DB_URL=sqlite3:///app/data/fn.db" -v $PWD/data:/app/data -p 8080:8080 treeder/functions
}
switch ($cmd)

View File

@@ -7,14 +7,12 @@ We currently support the following databases and they are passed in via the `DB_
docker run -e "DB_URL=postgres://user:pass@localhost:6212/mydb" ...
```
## [Bolt](https://github.com/boltdb/bolt) (default)
## sqlite3 (default)
URL: `bolt:///functions/data/functions.db`
URL: `sqlite3:///functions/data/functions.db`
Bolt is an embedded database which stores to disk. If you want to use this, be sure you don't lose the data directory by mounting
the directory on your host. eg: `docker run -v $PWD/data:/functions/data -e DB_URL=bolt:///functions/data/bolt.db ...`
[More on BoltDB](boltdb.md)
SQLite3 is an embedded database which stores to disk. If you want to use this, be sure you don't lose the data directory by mounting
the directory on your host. eg: `docker run -v $PWD/data:/functions/data -e DB_URL=sqlite3:///functions/data/fn.db ...`
## [PostgreSQL](http://www.postgresql.org/)

View File

@@ -1,11 +1,11 @@
# Oracle Functions using BoltDB
BoltDB is the default database, you just need to run the API.
SQLite3 is the default database, you just need to run the API.
## Persistent
To keep it persistent, add a volume flag to the command:
```
docker run --rm -it --privileged -v $PWD/bolt.db:/app/bolt.db -p 8080:8080 treeder/functions
docker run --rm -it --privileged -v $PWD/fn.db:/app/fn.db -p 8080:8080 treeder/functions
```

View File

@@ -1,19 +1,20 @@
# Function logs
We currently support the following function logs stores and they are passed in via the `LOGSTORE_URL` environment variable. For example:
Maximum size of single log entry: 4Mb
We currently support the following function logs stores and they are passed in
via the `LOGSTORE_URL` environment variable. For example:
```sh
docker run -e "LOGSTORE_URL=bolt:///functions/logs/bolt.db" ...
docker run -e "LOGSTORE_URL=sqlite3:///functions/logs/fn.db" ...
```
## [Bolt](https://github.com/boltdb/bolt) (default)
settings `LOGSTORE_URL` to `DB_URL` will put logs in the same database as
other data, this is not recommended for production.
URL: `bolt:///functions/logs/bolt.db`
## sqlite3 (default)
Bolt is an embedded database which stores to disk. If you want to use this, be sure you don't lose the data directory by mounting
the directory on your host. eg: `docker run -v $PWD/data:/functions/data -e LOGSTORE_URL=bolt:///functions/data/bolt.db ...`
example URL: `sqlite3:///functions/logs/fn.db`
[More on BoltDB](../databases/boltdb.md)
sqlite3 is an embedded database which stores to disk. If you want to use this, be sure you don't lose the data directory by mounting
the directory on your host. eg: `docker run -v $PWD/data:/functions/data -e LOGSTORE_URL=sqlite3:///functions/data/fn.db ...`
sqlite3 isn't recommended for production environments

View File

@@ -11,7 +11,7 @@ docker run -e VAR_NAME=VALUE ...
| Env Variables | Description | Default values |
| --------------|-------------|----------------|
| DB_URL | The database URL to use in URL format. See [Databases](databases/README.md) for more information. | bolt:///app/data/bolt.db |
| DB_URL | The database URL to use in URL format. See [Databases](databases/README.md) for more information. | sqlite3:///app/data/fn.db |
| MQ_URL | The message queue to use in URL format. See [Message Queues](mqs/README.md) for more information. | bolt:///app/data/worker_mq.db |
| API_URL | The primary Oracle Functions API URL to that this instance will talk to. In a production environment, this would be your load balancer URL. | N/A |
| PORT | Sets the port to run on | 8080 |

View File

@@ -65,7 +65,7 @@ func TestCalls(t *testing.T) {
t.Run("get-real-call", func(t *testing.T) {
callID := CallAsync(t, u, &bytes.Buffer{})
time.Sleep(time.Second * 2)
time.Sleep(time.Second * 5)
cfg := &call.GetCallsCallParams{
Call: callID,
Context: s.Context,

View File

@@ -204,7 +204,7 @@ func TestRouteExecutions(t *testing.T) {
u.Path = path.Join(u.Path, "r", s.AppName, routePath)
callID := CallAsync(t, u, &bytes.Buffer{})
time.Sleep(5 * time.Second)
time.Sleep(7 * time.Second)
cfg := &operations.GetCallsCallLogParams{
Call: callID,

View File

@@ -52,7 +52,7 @@ func CheckAppResponseError(t *testing.T, err error) {
case *apps.PostAppsDefault:
msg := err.(*apps.PostAppsDefault).Payload.Error.Message
code := err.(*apps.DeleteAppsAppDefault).Code()
code := err.(*apps.PostAppsDefault).Code()
t.Fatalf("Unexpected error occurred: %v. Status code: %v", msg, code)
return

10
glide.lock generated
View File

@@ -1,5 +1,5 @@
hash: c3a20e76f70802a5cb8ead0c4ac29c6bca272a3d738d9b95538d9fc238ad450c
updated: 2017-06-30T11:01:18.063490786-07:00
hash: 098ae92d825f7f80973ee99791e9e3f968f6f453b1da855d89f36cdbcdc27c6f
updated: 2017-07-06T18:45:39.5860504-07:00
imports:
- name: code.cloudfoundry.org/bytefmt
version: f4415fafc5619dd75599a54a7c91fb3948ad58bd
@@ -168,6 +168,10 @@ imports:
- mq
- name: github.com/jmoiron/jsonq
version: e874b168d07ecc7808bc950a17998a8aa3141d82
- name: github.com/jmoiron/sqlx
version: d9bd385d68c068f1fabb5057e3dedcbcbb039d0f
subpackages:
- reflectx
- name: github.com/lib/pq
version: 8837942c3e09574accbc5f150e2c5e057189cace
subpackages:
@@ -184,6 +188,8 @@ imports:
version: ee05b128a739a0fb76c7ebd3ae4810c1de808d6d
- name: github.com/mattn/go-isatty
version: fc9e8d8ef48496124e79ae0df75490096eccf6fe
- name: github.com/mattn/go-sqlite3
version: 9cbb0970444f7ee9ccc65376394a504150b25de1
- name: github.com/Microsoft/go-winio
version: f533f7a102197536779ea3a8cb881d639e21ec5a
- name: github.com/mitchellh/mapstructure

View File

@@ -68,5 +68,7 @@ import:
- package: gopkg.in/mgo.v2
subpackages:
- bson
- package: github.com/jmoiron/sqlx
- package: github.com/mattn/go-sqlite3
testImport:
- package: github.com/vrischmann/envconfig

11
test.sh
View File

@@ -6,8 +6,6 @@ docker rm -fv func-postgres-test || echo No prev test db container
docker run --name func-postgres-test -p 15432:5432 -d postgres
docker rm -fv func-mysql-test || echo No prev mysql test db container
docker run --name func-mysql-test -p 3307:3306 -e MYSQL_DATABASE=funcs -e MYSQL_ROOT_PASSWORD=root -d mysql
docker rm -fv func-redis-test|| echo No prev redis test db container
docker run --name func-redis-test -p 6301:6379 -d redis
sleep 5
case ${DOCKER_LOCATION:-localhost} in
localhost)
@@ -16,9 +14,6 @@ localhost)
export MYSQL_HOST=localhost
export MYSQL_PORT=3307
export REDIS_HOST=localhost
export REDIS_PORT=6301
;;
docker_ip)
if [[ ! -z ${DOCKER_HOST} ]]
@@ -30,9 +25,6 @@ docker_ip)
export MYSQL_HOST=${DOCKER_IP:-localhost}
export MYSQL_PORT=3307
export REDIS_HOST=${DOCKER_IP:-localhost}
export REDIS_PORT=6301
;;
container_ip)
export POSTGRES_HOST="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' func-postgres-test)"
@@ -40,9 +32,6 @@ container_ip)
export MYSQL_HOST="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' func-mysql-test)"
export MYSQL_PORT=3306
export REDIS_HOST="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' func-redis-test)"
export REDIS_PORT=6379
;;
esac

24
vendor/github.com/jmoiron/sqlx/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
tags
environ

23
vendor/github.com/jmoiron/sqlx/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
Copyright (c) 2013, Jason Moiron
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

183
vendor/github.com/jmoiron/sqlx/README.md generated vendored Normal file
View File

@@ -0,0 +1,183 @@
# sqlx
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
sqlx is a library which provides a set of extensions on go's standard
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
et al. all leave the underlying interfaces untouched, so that their interfaces
are a superset on the standard ones. This makes it relatively painless to
integrate existing codebases using database/sql with sqlx.
Major additional concepts are:
* Marshal rows into structs (with embedded struct support), maps, and slices
* Named parameter support including prepared statements
* `Get` and `Select` to go quickly from query to struct/slice
In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
explains how to use `database/sql` along with sqlx.
## Recent Changes
* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
This breaks backwards compatibility, but it's in a way that is trivially fixable
(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
active development currently.
* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905).
### Backwards Compatibility
There is no Go1-like promise of absolute stability, but I take the issue seriously
and will maintain the library in a compatible state unless vital bugs prevent me
from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
a wider API cleanup was done at the time of fixing. It's possible this will happen
in future; if it does, a git tag will be provided for users requiring the old
behavior to continue to use it until such a time as they can migrate.
## install
go get github.com/jmoiron/sqlx
## issues
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
`Columns()` does not fully qualify column names in queries like:
```sql
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
```
making a struct or map destination ambiguous. Use `AS` in your queries
to give columns distinct names, `rows.Scan` to scan them manually, or
`SliceScan` to get a slice of results.
## usage
Below is an example which shows some common use cases for sqlx. Check
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
usage.
```go
package main
import (
_ "github.com/lib/pq"
"database/sql"
"github.com/jmoiron/sqlx"
"log"
)
var schema = `
CREATE TABLE person (
first_name text,
last_name text,
email text
);
CREATE TABLE place (
country text,
city text NULL,
telcode integer
)`
type Person struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
}
type Place struct {
Country string
City sql.NullString
TelCode int
}
func main() {
// this Pings the database trying to connect, panics on error
// use sqlx.Open() for sql.Open() semantics
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
if err != nil {
log.Fatalln(err)
}
// exec the schema or fail; multi-statement Exec behavior varies between
// database drivers; pq will exec them all, sqlite3 won't, ymmv
db.MustExec(schema)
tx := db.MustBegin()
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
tx.Commit()
// Query the database, storing results in a []Person (wrapped in []interface{})
people := []Person{}
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
jason, john := people[0], people[1]
fmt.Printf("%#v\n%#v", jason, john)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
// You can also get a single result, a la QueryRow
jason = Person{}
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
fmt.Printf("%#v\n", jason)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
places := []Place{}
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
fmt.Println(err)
return
}
usa, singsing, honkers := places[0], places[1], places[2]
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Loop through rows using only one struct
place := Place{}
rows, err := db.Queryx("SELECT * FROM place")
for rows.Next() {
err := rows.StructScan(&place)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%#v\n", place)
}
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Named queries, using `:name` as the bindvar. Automatic bindvar support
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
map[string]interface{}{
"first": "Bin",
"last": "Smuth",
"email": "bensmith@allblacks.nz",
})
// Selects Mr. Smith from the database
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
// Named queries can also use structs. Their bind names follow the same rules
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
// is taken into consideration.
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
}
```

207
vendor/github.com/jmoiron/sqlx/bind.go generated vendored Normal file
View File

@@ -0,0 +1,207 @@
package sqlx
import (
"bytes"
"errors"
"reflect"
"strconv"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Bindvar types supported by Rebind, BindMap and BindStruct.
const (
UNKNOWN = iota
QUESTION
DOLLAR
NAMED
)
// BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int {
switch driverName {
case "postgres", "pgx":
return DOLLAR
case "mysql":
return QUESTION
case "sqlite3":
return QUESTION
case "oci8", "ora", "goracle":
return NAMED
}
return UNKNOWN
}
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
// losing much speed, and should be to avoid confusion.
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
func Rebind(bindType int, query string) string {
switch bindType {
case QUESTION, UNKNOWN:
return query
}
// Add space enough for 10 params before we have to allocate
rqb := make([]byte, 0, len(query)+10)
var i, j int
for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
rqb = append(rqb, query[:i]...)
switch bindType {
case DOLLAR:
rqb = append(rqb, '$')
case NAMED:
rqb = append(rqb, ':', 'a', 'r', 'g')
}
j++
rqb = strconv.AppendInt(rqb, int64(j), 10)
query = query[i+1:]
}
return string(append(rqb, query...))
}
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
// much simpler and should be more resistant to odd unicode, but it is twice as
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
// problems arise with its somewhat naive handling of unicode.
func rebindBuff(bindType int, query string) string {
if bindType != DOLLAR {
return query
}
b := make([]byte, 0, len(query))
rqb := bytes.NewBuffer(b)
j := 1
for _, r := range query {
if r == '?' {
rqb.WriteRune('$')
rqb.WriteString(strconv.Itoa(j))
j++
} else {
rqb.WriteRune(r)
}
}
return rqb.String()
}
// In expands slice values in args, returning the modified query string
// and a new arg list that can be executed by a database. The `query` should
// use the `?` bindVar. The return value uses the `?` bindVar.
func In(query string, args ...interface{}) (string, []interface{}, error) {
// argMeta stores reflect.Value and length for slices and
// the value itself for non-slice arguments
type argMeta struct {
v reflect.Value
i interface{}
length int
}
var flatArgsCount int
var anySlices bool
meta := make([]argMeta, len(args))
for i, arg := range args {
v := reflect.ValueOf(arg)
t := reflectx.Deref(v.Type())
if t.Kind() == reflect.Slice {
meta[i].length = v.Len()
meta[i].v = v
anySlices = true
flatArgsCount += meta[i].length
if meta[i].length == 0 {
return "", nil, errors.New("empty slice passed to 'in' query")
}
} else {
meta[i].i = arg
flatArgsCount++
}
}
// don't do any parsing if there aren't any slices; note that this means
// some errors that we might have caught below will not be returned.
if !anySlices {
return query, args, nil
}
newArgs := make([]interface{}, 0, flatArgsCount)
buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount))
var arg, offset int
for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
if arg >= len(meta) {
// if an argument wasn't passed, lets return an error; this is
// not actually how database/sql Exec/Query works, but since we are
// creating an argument list programmatically, we want to be able
// to catch these programmer errors earlier.
return "", nil, errors.New("number of bindVars exceeds arguments")
}
argMeta := meta[arg]
arg++
// not a slice, continue.
// our questionmark will either be written before the next expansion
// of a slice or after the loop when writing the rest of the query
if argMeta.length == 0 {
offset = offset + i + 1
newArgs = append(newArgs, argMeta.i)
continue
}
// write everything up to and including our ? character
buf.WriteString(query[:offset+i+1])
for si := 1; si < argMeta.length; si++ {
buf.WriteString(", ?")
}
newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
// slice the query and reset the offset. this avoids some bookkeeping for
// the write after the loop
query = query[offset+i+1:]
offset = 0
}
buf.WriteString(query)
if arg < len(meta) {
return "", nil, errors.New("number of bindVars less than number arguments")
}
return buf.String(), newArgs, nil
}
func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
switch val := v.Interface().(type) {
case []interface{}:
args = append(args, val...)
case []int:
for i := range val {
args = append(args, val[i])
}
case []string:
for i := range val {
args = append(args, val[i])
}
default:
for si := 0; si < vlen; si++ {
args = append(args, v.Index(si).Interface())
}
}
return args
}

12
vendor/github.com/jmoiron/sqlx/doc.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// Package sqlx provides general purpose extensions to database/sql.
//
// It is intended to seamlessly wrap database/sql and provide convenience
// methods which are useful in the development of database driven applications.
// None of the underlying database/sql methods are changed. Instead all extended
// behavior is implemented through new methods defined on wrapper types.
//
// Additions include scanning into structs, named query support, rebinding
// queries for different drivers, convenient shorthands for common error handling
// and more.
//
package sqlx

344
vendor/github.com/jmoiron/sqlx/named.go generated vendored Normal file
View File

@@ -0,0 +1,344 @@
package sqlx
// Named Query Support
//
// * BindMap - bind query bindvars to map/struct args
// * NamedExec, NamedQuery - named query w/ struct or map
// * NamedStmt - a pre-compiled named query which is a prepared statement
//
// Internal Interfaces:
//
// * compileNamedQuery - rebind a named query, returning a query and list of names
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
//
import (
"database/sql"
"errors"
"fmt"
"reflect"
"strconv"
"unicode"
"github.com/jmoiron/sqlx/reflectx"
)
// NamedStmt is a prepared statement that executes named queries. Prepare it
// how you would execute a NamedQuery, but pass in a struct or map when executing.
type NamedStmt struct {
Params []string
QueryString string
Stmt *Stmt
}
// Close closes the named statement.
func (n *NamedStmt) Close() error {
return n.Stmt.Close()
}
// Exec executes a named statement using the struct passed.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.Exec(args...)
}
// Query executes a named statement using the struct argument, returning rows.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.Query(args...)
}
// QueryRow executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowx(args...)
}
// MustExec execs a NamedStmt, panicing on error
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
res, err := n.Exec(arg)
if err != nil {
panic(err)
}
return res
}
// Queryx using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
r, err := n.Query(arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
return n.QueryRow(arg)
}
// Select using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
rows, err := n.Queryx(arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
r := n.QueryRowx(arg)
return r.scanAny(dest, false)
}
// Unsafe creates an unsafe version of the NamedStmt
func (n *NamedStmt) Unsafe() *NamedStmt {
r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
r.Stmt.unsafe = true
return r
}
// A union interface of preparer and binder, required to be able to prepare
// named statements (as the bindtype must be determined).
type namedPreparer interface {
Preparer
binder
}
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := Preparex(p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMapArgs(names, maparg)
}
return bindArgs(names, arg, m)
}
// private interface to generate a list of interfaces from a given struct
// type, given a list of names to pull out of the struct. Used by public
// BindStruct interface.
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
// grab the indirected value of arg
v := reflect.ValueOf(arg)
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
v = v.Elem()
}
fields := m.TraversalsByName(v.Type(), names)
for i, t := range fields {
if len(t) == 0 {
return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
}
val := reflectx.FieldByIndexesReadOnly(v, t)
arglist = append(arglist, val.Interface())
}
return arglist, nil
}
// like bindArgs, but for maps.
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
for _, name := range names {
val, ok := arg[name]
if !ok {
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
}
arglist = append(arglist, val)
}
return arglist, nil
}
// bindStruct binds a named parameter query with fields from a struct argument.
// The rules for binding field names to parameter names follow the same
// conventions as for StructScan, including obeying the `db` struct tags.
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindArgs(names, arg, m)
if err != nil {
return "", []interface{}{}, err
}
return bound, arglist, nil
}
// bindMap binds a named parameter query with a map of arguments.
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindMapArgs(names, args)
return bound, arglist, err
}
// -- Compilation of Named Queries
// Allow digits and letters in bind params; additionally runes are
// checked against underscores, meaning that bind params can have be
// alphanumeric with underscores. Mind the difference between unicode
// digits and numbers, where '5' is a digit but '五' is not.
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
// FIXME: this function isn't safe for unicode named params, as a failing test
// can testify. This is not a regression but a failure of the original code
// as well. It should be modified to range over runes in a string rather than
// bytes, even though this is less convenient and slower. Hopefully the
// addition of the prepared NamedStmt (which will only do this once) will make
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
// a list of names.
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
names = make([]string, 0, 10)
rebound := make([]byte, 0, len(qs))
inName := false
last := len(qs) - 1
currentVar := 1
name := make([]byte, 0, 10)
for i, b := range qs {
// a ':' while we're in a name is an error
if b == ':' {
// if this is the second ':' in a '::' escape sequence, append a ':'
if inName && i > 0 && qs[i-1] == ':' {
rebound = append(rebound, ':')
inName = false
continue
} else if inName {
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
return query, names, err
}
inName = true
name = []byte{}
// if we're in a name, and this is an allowed character, continue
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
// append the byte to the name if we are in a name and not on the last byte
name = append(name, b)
// if we're in a name and it's not an allowed character, the name is done
} else if inName {
inName = false
// if this is the final byte of the string and it is part of the name, then
// make sure to add it to the name
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
name = append(name, b)
}
// add the string representation to the names list
names = append(names, string(name))
// add a proper bindvar for the bindType
switch bindType {
// oracle only supports named type bind vars even for positional
case NAMED:
rebound = append(rebound, ':')
rebound = append(rebound, name...)
case QUESTION, UNKNOWN:
rebound = append(rebound, '?')
case DOLLAR:
rebound = append(rebound, '$')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
}
// add this byte to string unless it was not part of the name
if i != last {
rebound = append(rebound, b)
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
rebound = append(rebound, b)
}
} else {
// this is a normal byte and should just go onto the rebound query
rebound = append(rebound, b)
}
}
return string(rebound), names, err
}
// BindNamed binds a struct or a map to a query with named parameters.
// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(bindType, query, arg, mapper())
}
// Named takes a query using named parameters and an argument and
// returns a new query with a list of args that can be executed by
// a database. The return value uses the `?` bindvar.
func Named(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(QUESTION, query, arg, mapper())
}
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMap(bindType, query, maparg)
}
return bindStruct(bindType, query, arg, m)
}
// NamedQuery binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Queryx(q, args...)
}
// NamedExec uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Exec(q, args...)
}

132
vendor/github.com/jmoiron/sqlx/named_context.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// +build go1.8
package sqlx
import (
"context"
"database/sql"
)
// A union interface of contextPreparer and binder, required to be able to
// prepare named statements with context (as the bindtype must be determined).
type namedPreparerContext interface {
PreparerContext
binder
}
func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := PreparexContext(ctx, p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
// ExecContext executes a named statement using the struct passed.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.ExecContext(ctx, args...)
}
// QueryContext executes a named statement using the struct argument, returning rows.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.QueryContext(ctx, args...)
}
// QueryRowContext executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowxContext(ctx, args...)
}
// MustExecContext execs a NamedStmt, panicing on error
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
res, err := n.ExecContext(ctx, arg)
if err != nil {
panic(err)
}
return res
}
// QueryxContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
r, err := n.QueryContext(ctx, arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
return n.QueryRowContext(ctx, arg)
}
// SelectContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
rows, err := n.QueryxContext(ctx, arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// GetContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
r := n.QueryRowxContext(ctx, arg)
return r.scanAny(dest, false)
}
// NamedQueryContext binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.QueryxContext(ctx, q, args...)
}
// NamedExecContext uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.ExecContext(ctx, q, args...)
}

136
vendor/github.com/jmoiron/sqlx/named_context_test.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
// +build go1.8
package sqlx
import (
"context"
"database/sql"
"testing"
)
func TestNamedContextQueries(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
test := Test{t}
var ns *NamedStmt
var err error
ctx := context.Background()
// Check that invalid preparations fail
ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first:name")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
ns, err = db.PrepareNamedContext(ctx, "invalid sql")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
// Check closing works as anticipated
ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first_name")
test.Error(err)
err = ns.Close()
test.Error(err)
ns, err = db.PrepareNamedContext(ctx, `
SELECT first_name, last_name, email
FROM person WHERE first_name=:first_name AND email=:email`)
test.Error(err)
// test Queryx w/ uses Query
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
rows, err := ns.QueryxContext(ctx, p)
test.Error(err)
for rows.Next() {
var p2 Person
rows.StructScan(&p2)
if p.FirstName != p2.FirstName {
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
}
if p.LastName != p2.LastName {
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
}
if p.Email != p2.Email {
t.Errorf("got %s, expected %s", p.Email, p2.Email)
}
}
// test Select
people := make([]Person, 0, 5)
err = ns.SelectContext(ctx, &people, p)
test.Error(err)
if len(people) != 1 {
t.Errorf("got %d results, expected %d", len(people), 1)
}
if p.FirstName != people[0].FirstName {
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
}
if p.LastName != people[0].LastName {
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
}
if p.Email != people[0].Email {
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
}
// test Exec
ns, err = db.PrepareNamedContext(ctx, `
INSERT INTO person (first_name, last_name, email)
VALUES (:first_name, :last_name, :email)`)
test.Error(err)
js := Person{
FirstName: "Julien",
LastName: "Savea",
Email: "jsavea@ab.co.nz",
}
_, err = ns.ExecContext(ctx, js)
test.Error(err)
// Make sure we can pull him out again
p2 := Person{}
db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
if p2.Email != js.Email {
t.Errorf("expected %s, got %s", js.Email, p2.Email)
}
// test Txn NamedStmts
tx := db.MustBeginTx(ctx, nil)
txns := tx.NamedStmtContext(ctx, ns)
// We're going to add Steven in this txn
sl := Person{
FirstName: "Steven",
LastName: "Luatua",
Email: "sluatua@ab.co.nz",
}
_, err = txns.ExecContext(ctx, sl)
test.Error(err)
// then rollback...
tx.Rollback()
// looking for Steven after a rollback should fail
err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
if err != sql.ErrNoRows {
t.Errorf("expected no rows error, got %v", err)
}
// now do the same, but commit
tx = db.MustBeginTx(ctx, nil)
txns = tx.NamedStmtContext(ctx, ns)
_, err = txns.ExecContext(ctx, sl)
test.Error(err)
tx.Commit()
// looking for Steven after a Commit should succeed
err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
test.Error(err)
if p2.Email != sl.Email {
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
}
})
}

227
vendor/github.com/jmoiron/sqlx/named_test.go generated vendored Normal file
View File

@@ -0,0 +1,227 @@
package sqlx
import (
"database/sql"
"testing"
)
func TestCompileQuery(t *testing.T) {
table := []struct {
Q, R, D, N string
V []string
}{
// basic test for named parameters, invalid char ',' terminating
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
V: []string{"name", "age", "first", "last"},
},
// This query tests a named parameter ending the string as well as numbers
{
Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
V: []string{"first_name", "last_name"},
},
/* This unicode awareness test sadly fails, because of our byte-wise worldview.
* We could certainly iterate by Rune instead, though it's a great deal slower,
* it's probably the RightWay(tm)
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: []string{"name", "age", "first", "last"},
},
*/
}
for _, test := range table {
qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
if err != nil {
t.Error(err)
}
if qr != test.R {
t.Errorf("expected %s, got %s", test.R, qr)
}
if len(names) != len(test.V) {
t.Errorf("expected %#v, got %#v", test.V, names)
} else {
for i, name := range names {
if name != test.V[i] {
t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
}
}
}
qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
if qd != test.D {
t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
}
qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
if qq != test.N {
t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
}
}
}
type Test struct {
t *testing.T
}
func (t Test) Error(err error, msg ...interface{}) {
if err != nil {
if len(msg) == 0 {
t.t.Error(err)
} else {
t.t.Error(msg...)
}
}
}
func (t Test) Errorf(err error, format string, args ...interface{}) {
if err != nil {
t.t.Errorf(format, args...)
}
}
func TestNamedQueries(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
test := Test{t}
var ns *NamedStmt
var err error
// Check that invalid preparations fail
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
ns, err = db.PrepareNamed("invalid sql")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
// Check closing works as anticipated
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
test.Error(err)
err = ns.Close()
test.Error(err)
ns, err = db.PrepareNamed(`
SELECT first_name, last_name, email
FROM person WHERE first_name=:first_name AND email=:email`)
test.Error(err)
// test Queryx w/ uses Query
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
rows, err := ns.Queryx(p)
test.Error(err)
for rows.Next() {
var p2 Person
rows.StructScan(&p2)
if p.FirstName != p2.FirstName {
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
}
if p.LastName != p2.LastName {
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
}
if p.Email != p2.Email {
t.Errorf("got %s, expected %s", p.Email, p2.Email)
}
}
// test Select
people := make([]Person, 0, 5)
err = ns.Select(&people, p)
test.Error(err)
if len(people) != 1 {
t.Errorf("got %d results, expected %d", len(people), 1)
}
if p.FirstName != people[0].FirstName {
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
}
if p.LastName != people[0].LastName {
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
}
if p.Email != people[0].Email {
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
}
// test Exec
ns, err = db.PrepareNamed(`
INSERT INTO person (first_name, last_name, email)
VALUES (:first_name, :last_name, :email)`)
test.Error(err)
js := Person{
FirstName: "Julien",
LastName: "Savea",
Email: "jsavea@ab.co.nz",
}
_, err = ns.Exec(js)
test.Error(err)
// Make sure we can pull him out again
p2 := Person{}
db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
if p2.Email != js.Email {
t.Errorf("expected %s, got %s", js.Email, p2.Email)
}
// test Txn NamedStmts
tx := db.MustBegin()
txns := tx.NamedStmt(ns)
// We're going to add Steven in this txn
sl := Person{
FirstName: "Steven",
LastName: "Luatua",
Email: "sluatua@ab.co.nz",
}
_, err = txns.Exec(sl)
test.Error(err)
// then rollback...
tx.Rollback()
// looking for Steven after a rollback should fail
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
if err != sql.ErrNoRows {
t.Errorf("expected no rows error, got %v", err)
}
// now do the same, but commit
tx = db.MustBegin()
txns = tx.NamedStmt(ns)
_, err = txns.Exec(sl)
test.Error(err)
tx.Commit()
// looking for Steven after a Commit should succeed
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
test.Error(err)
if p2.Email != sl.Email {
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
}
})
}

17
vendor/github.com/jmoiron/sqlx/reflectx/README.md generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# reflectx
The sqlx package has special reflect needs. In particular, it needs to:
* be able to map a name to a field
* understand embedded structs
* understand mapping names to fields by a particular tag
* user specified name -> field mapping functions
These behaviors mimic the behaviors by the standard library marshallers and also the
behavior of standard Go accessors.
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
tags in the ways that are vital to most marshallers, and they are slow.
This reflectx package extends reflect to achieve these goals.

422
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go generated vendored Normal file
View File

@@ -0,0 +1,422 @@
// Package reflectx implements extensions to the standard reflect lib suitable
// for implementing marshalling and unmarshalling packages. The main Mapper type
// allows for Go-compatible named attribute access, including accessing embedded
// struct attributes and the ability to use functions and struct tags to
// customize field names.
//
package reflectx
import (
"reflect"
"runtime"
"strings"
"sync"
)
// A FieldInfo is metadata for a struct field.
type FieldInfo struct {
Index []int
Path string
Field reflect.StructField
Zero reflect.Value
Name string
Options map[string]string
Embedded bool
Children []*FieldInfo
Parent *FieldInfo
}
// A StructMap is an index of field metadata for a struct.
type StructMap struct {
Tree *FieldInfo
Index []*FieldInfo
Paths map[string]*FieldInfo
Names map[string]*FieldInfo
}
// GetByPath returns a *FieldInfo for a given string path.
func (f StructMap) GetByPath(path string) *FieldInfo {
return f.Paths[path]
}
// GetByTraversal returns a *FieldInfo for a given integer path. It is
// analogous to reflect.FieldByIndex, but using the cached traversal
// rather than re-executing the reflect machinery each time.
func (f StructMap) GetByTraversal(index []int) *FieldInfo {
if len(index) == 0 {
return nil
}
tree := f.Tree
for _, i := range index {
if i >= len(tree.Children) || tree.Children[i] == nil {
return nil
}
tree = tree.Children[i]
}
return tree
}
// Mapper is a general purpose mapper of names to struct fields. A Mapper
// behaves like most marshallers in the standard library, obeying a field tag
// for name mapping but also providing a basic transform function.
type Mapper struct {
cache map[reflect.Type]*StructMap
tagName string
tagMapFunc func(string) string
mapFunc func(string) string
mutex sync.Mutex
}
// NewMapper returns a new mapper using the tagName as its struct field tag.
// If tagName is the empty string, it is ignored.
func NewMapper(tagName string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
}
}
// NewMapperTagFunc returns a new mapper which contains a mapper for field names
// AND a mapper for tag values. This is useful for tags like json which can
// have values like "name,omitempty".
func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: mapFunc,
tagMapFunc: tagMapFunc,
}
}
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
// a struct field name mapper func given by f. Tags will take precedence, but
// for any other field, the mapped name will be f(field.Name)
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: f,
}
}
// TypeMap returns a mapping of field strings to int slices representing
// the traversal down the struct to reach the field.
func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
m.mutex.Lock()
mapping, ok := m.cache[t]
if !ok {
mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
m.cache[t] = mapping
}
m.mutex.Unlock()
return mapping
}
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
r := map[string]reflect.Value{}
tm := m.TypeMap(v.Type())
for tagName, fi := range tm.Names {
r[tagName] = FieldByIndexes(v, fi.Index)
}
return r
}
// FieldByName returns a field by its mapped name as a reflect.Value.
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
// Returns zero Value if the name is not found.
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
fi, ok := tm.Names[name]
if !ok {
return v
}
return FieldByIndexes(v, fi.Index)
}
// FieldsByName returns a slice of values corresponding to the slice of names
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
// to a struct Kind. Returns zero Value for each name not found.
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
vals := make([]reflect.Value, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
vals = append(vals, *new(reflect.Value))
} else {
vals = append(vals, FieldByIndexes(v, fi.Index))
}
}
return vals
}
// TraversalsByName returns a slice of int slices which represent the struct
// traversals for each mapped name. Panics if t is not a struct or Indirectable
// to a struct. Returns empty int slice for each name not found.
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
t = Deref(t)
mustBe(t, reflect.Struct)
tm := m.TypeMap(t)
r := make([][]int, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
r = append(r, []int{})
} else {
r = append(r, fi.Index)
}
}
return r
}
// FieldByIndexes returns a value for the field given by the struct traversal
// for the given value.
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
// if this is a pointer and it's nil, allocate a new value and set it
if v.Kind() == reflect.Ptr && v.IsNil() {
alloc := reflect.New(Deref(v.Type()))
v.Set(alloc)
}
if v.Kind() == reflect.Map && v.IsNil() {
v.Set(reflect.MakeMap(v.Type()))
}
}
return v
}
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
// but is not concerned with allocating nil pointers because the value is
// going to be used for reading and not setting.
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
}
return v
}
// Deref is Indirect for reflect.Types
func Deref(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
// -- helpers & utilities --
type kinder interface {
Kind() reflect.Kind
}
// mustBe checks a value against a kind, panicing with a reflect.ValueError
// if the kind isn't that which is required.
func mustBe(v kinder, expected reflect.Kind) {
if k := v.Kind(); k != expected {
panic(&reflect.ValueError{Method: methodName(), Kind: k})
}
}
// methodName returns the caller of the function calling methodName
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
type typeQueue struct {
t reflect.Type
fi *FieldInfo
pp string // Parent path
}
// A copying append that creates a new slice each time.
func apnd(is []int, i int) []int {
x := make([]int, len(is)+1)
for p, n := range is {
x[p] = n
}
x[len(x)-1] = i
return x
}
type mapf func(string) string
// parseName parses the tag and the target name for the given field using
// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
// field's name to a target name, and tagMapFunc for mapping the tag to
// a target name.
func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
// first, set the fieldName to the field's name
fieldName = field.Name
// if a mapFunc is set, use that to override the fieldName
if mapFunc != nil {
fieldName = mapFunc(fieldName)
}
// if there's no tag to look for, return the field name
if tagName == "" {
return "", fieldName
}
// if this tag is not set using the normal convention in the tag,
// then return the fieldname.. this check is done because according
// to the reflect documentation:
// If the tag does not have the conventional format,
// the value returned by Get is unspecified.
// which doesn't sound great.
if !strings.Contains(string(field.Tag), tagName+":") {
return "", fieldName
}
// at this point we're fairly sure that we have a tag, so lets pull it out
tag = field.Tag.Get(tagName)
// if we have a mapper function, call it on the whole tag
// XXX: this is a change from the old version, which pulled out the name
// before the tagMapFunc could be run, but I think this is the right way
if tagMapFunc != nil {
tag = tagMapFunc(tag)
}
// finally, split the options from the name
parts := strings.Split(tag, ",")
fieldName = parts[0]
return tag, fieldName
}
// parseOptions parses options out of a tag string, skipping the name
func parseOptions(tag string) map[string]string {
parts := strings.Split(tag, ",")
options := make(map[string]string, len(parts))
if len(parts) > 1 {
for _, opt := range parts[1:] {
// short circuit potentially expensive split op
if strings.Contains(opt, "=") {
kv := strings.Split(opt, "=")
options[kv[0]] = kv[1]
continue
}
options[opt] = ""
}
}
return options
}
// getMapping returns a mapping for the t type, using the tagName, mapFunc and
// tagMapFunc to determine the canonical names of fields.
func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
m := []*FieldInfo{}
root := &FieldInfo{}
queue := []typeQueue{}
queue = append(queue, typeQueue{Deref(t), root, ""})
QueueLoop:
for len(queue) != 0 {
// pop the first item off of the queue
tq := queue[0]
queue = queue[1:]
// ignore recursive field
for p := tq.fi.Parent; p != nil; p = p.Parent {
if tq.fi.Field.Type == p.Field.Type {
continue QueueLoop
}
}
nChildren := 0
if tq.t.Kind() == reflect.Struct {
nChildren = tq.t.NumField()
}
tq.fi.Children = make([]*FieldInfo, nChildren)
// iterate through all of its fields
for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
f := tq.t.Field(fieldPos)
// parse the tag and the target name using the mapping options for this field
tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
// if the name is "-", disabled via a tag, skip it
if name == "-" {
continue
}
fi := FieldInfo{
Field: f,
Name: name,
Zero: reflect.New(f.Type).Elem(),
Options: parseOptions(tag),
}
// if the path is empty this path is just the name
if tq.pp == "" {
fi.Path = fi.Name
} else {
fi.Path = tq.pp + "." + fi.Name
}
// skip unexported fields
if len(f.PkgPath) != 0 && !f.Anonymous {
continue
}
// bfs search of anonymous embedded structs
if f.Anonymous {
pp := tq.pp
if tag != "" {
pp = fi.Path
}
fi.Embedded = true
fi.Index = apnd(tq.fi.Index, fieldPos)
nChildren := 0
ft := Deref(f.Type)
if ft.Kind() == reflect.Struct {
nChildren = ft.NumField()
}
fi.Children = make([]*FieldInfo, nChildren)
queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
} else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
}
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Parent = tq.fi
tq.fi.Children[fieldPos] = &fi
m = append(m, &fi)
}
}
flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
for _, fi := range flds.Index {
flds.Paths[fi.Path] = fi
if fi.Name != "" && !fi.Embedded {
flds.Names[fi.Path] = fi
}
}
return flds
}

905
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go generated vendored Normal file
View File

@@ -0,0 +1,905 @@
package reflectx
import (
"reflect"
"strings"
"testing"
)
func ival(v reflect.Value) int {
return v.Interface().(int)
}
func TestBasic(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
fv := reflect.ValueOf(f)
m := NewMapperFunc("", func(s string) string { return s })
v := m.FieldByName(fv, "A")
if ival(v) != f.A {
t.Errorf("Expecting %d, got %d", ival(v), f.A)
}
v = m.FieldByName(fv, "B")
if ival(v) != f.B {
t.Errorf("Expecting %d, got %d", f.B, ival(v))
}
v = m.FieldByName(fv, "C")
if ival(v) != f.C {
t.Errorf("Expecting %d, got %d", f.C, ival(v))
}
}
func TestBasicEmbedded(t *testing.T) {
type Foo struct {
A int
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int
C int `db:"-"`
}
type Baz struct {
A int
Bar `db:"Bar"`
}
m := NewMapperFunc("db", func(s string) string { return s })
z := Baz{}
z.A = 1
z.B = 2
z.C = 4
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.Index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "A")
if ival(v) != z.A {
t.Errorf("Expecting %d, got %d", z.A, ival(v))
}
v = m.FieldByName(zv, "Bar.B")
if ival(v) != z.Bar.B {
t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v))
}
v = m.FieldByName(zv, "Bar.A")
if ival(v) != z.Bar.Foo.A {
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "Bar.C")
if _, ok := v.Interface().(int); ok {
t.Errorf("Expecting Bar.C to not exist")
}
fi := fields.GetByPath("Bar.C")
if fi != nil {
t.Errorf("Bar.C should not exist")
}
}
func TestEmbeddedSimple(t *testing.T) {
type UUID [16]byte
type MyID struct {
UUID
}
type Item struct {
ID MyID
}
z := Item{}
m := NewMapper("db")
m.TypeMap(reflect.TypeOf(z))
}
func TestBasicEmbeddedWithTags(t *testing.T) {
type Foo struct {
A int `db:"a"`
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int `db:"b"`
}
type Baz struct {
A int `db:"a"`
Bar // `db:""` is implied for an embedded struct
}
m := NewMapper("db")
z := Baz{}
z.A = 1
z.B = 2
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "a")
if ival(v) != z.Bar.Foo.A { // the dominant field
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "b")
if ival(v) != z.B {
t.Errorf("Expecting %d, got %d", z.B, ival(v))
}
}
func TestFlatTags(t *testing.T) {
m := NewMapper("db")
type Asset struct {
Title string `db:"title"`
}
type Post struct {
Author string `db:"author,required"`
Asset Asset `db:""`
}
// Post columns: (author title)
post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
}
func TestNestedStruct(t *testing.T) {
m := NewMapper("db")
type Details struct {
Active bool `db:"active"`
}
type Asset struct {
Title string `db:"title"`
Details Details `db:"details"`
}
type Post struct {
Author string `db:"author,required"`
Asset `db:"asset"`
}
// Post columns: (author asset.title asset.details.active)
post := Post{
Author: "Joe",
Asset: Asset{Title: "Hello", Details: Details{Active: true}},
}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if _, ok := v.Interface().(string); ok {
t.Errorf("Expecting field to not exist")
}
v = m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset.details.active")
if v.Interface().(bool) != post.Asset.Details.Active {
t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool))
}
}
func TestInlineStruct(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Employee struct {
Name string
ID int
}
type Boss Employee
type person struct {
Employee `db:"employee"`
Boss `db:"boss"`
}
// employees columns: (employee.name employee.id boss.name boss.id)
em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}}
ev := reflect.ValueOf(em)
fields := m.TypeMap(reflect.TypeOf(em))
if len(fields.Index) != 6 {
t.Errorf("Expecting 6 fields")
}
v := m.FieldByName(ev, "employee.name")
if v.Interface().(string) != em.Employee.Name {
t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string))
}
v = m.FieldByName(ev, "boss.id")
if ival(v) != em.Boss.ID {
t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v))
}
}
func TestRecursiveStruct(t *testing.T) {
type Person struct {
Parent *Person
}
m := NewMapperFunc("db", strings.ToLower)
var p *Person
m.TypeMap(reflect.TypeOf(p))
}
func TestFieldsEmbedded(t *testing.T) {
m := NewMapper("db")
type Person struct {
Name string `db:"name,size=64"`
}
type Place struct {
Name string `db:"name"`
}
type Article struct {
Title string `db:"title"`
}
type PP struct {
Person `db:"person,required"`
Place `db:",someflag"`
Article `db:",required"`
}
// PP columns: (person.name name title)
pp := PP{}
pp.Person.Name = "Peter"
pp.Place.Name = "Toronto"
pp.Article.Title = "Best city ever"
fields := m.TypeMap(reflect.TypeOf(pp))
// for i, f := range fields {
// log.Println(i, f)
// }
ppv := reflect.ValueOf(pp)
v := m.FieldByName(ppv, "person.name")
if v.Interface().(string) != pp.Person.Name {
t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "name")
if v.Interface().(string) != pp.Place.Name {
t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "title")
if v.Interface().(string) != pp.Article.Title {
t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string))
}
fi := fields.GetByPath("person")
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
if !fi.Embedded {
t.Errorf("Expecting field to be embedded")
}
if len(fi.Index) != 1 || fi.Index[0] != 0 {
t.Errorf("Expecting index to be [0]")
}
fi = fields.GetByPath("person.name")
if fi == nil {
t.Errorf("Expecting person.name to exist")
}
if fi.Path != "person.name" {
t.Errorf("Expecting %s, got %s", "person.name", fi.Path)
}
if fi.Options["size"] != "64" {
t.Errorf("Expecting %s, got %s", "64", fi.Options["size"])
}
fi = fields.GetByTraversal([]int{1, 0})
if fi == nil {
t.Errorf("Expecting traveral to exist")
}
if fi.Path != "name" {
t.Errorf("Expecting %s, got %s", "name", fi.Path)
}
fi = fields.GetByTraversal([]int{2})
if fi == nil {
t.Errorf("Expecting traversal to exist")
}
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"})
if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) {
t.Errorf("Expecting traversal: %v", trs)
}
}
func TestPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Asset struct {
Title string
}
type Post struct {
*Asset `db:"asset"`
Author string
}
post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}}
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 3 {
t.Errorf("Expecting 3 fields")
}
v := m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestNamedPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type User struct {
Name string
}
type Asset struct {
Title string
Owner *User `db:"owner"`
}
type Post struct {
Author string
Asset1 *Asset `db:"asset1"`
Asset2 *Asset `db:"asset2"`
}
post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 9 {
t.Errorf("Expecting 9 fields")
}
v := m.FieldByName(pv, "asset1.title")
if v.Interface().(string) != post.Asset1.Title {
t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset1.owner.name")
if v.Interface().(string) != post.Asset1.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.title")
if v.Interface().(string) != post.Asset2.Title {
t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.owner.name")
if v.Interface().(string) != post.Asset2.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestFieldMap(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
m := NewMapperFunc("db", strings.ToLower)
fm := m.FieldMap(reflect.ValueOf(f))
if len(fm) != 3 {
t.Errorf("Expecting %d keys, got %d", 3, len(fm))
}
if fm["a"].Interface().(int) != 1 {
t.Errorf("Expecting %d, got %d", 1, ival(fm["a"]))
}
if fm["b"].Interface().(int) != 2 {
t.Errorf("Expecting %d, got %d", 2, ival(fm["b"]))
}
if fm["c"].Interface().(int) != 3 {
t.Errorf("Expecting %d, got %d", 3, ival(fm["c"]))
}
}
func TestTagNameMapping(t *testing.T) {
type Strategy struct {
StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"`
StrategyName string
}
m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string {
if strings.Contains(value, ",") {
return strings.Split(value, ",")[0]
}
return value
})
strategy := Strategy{"1", "Alpah"}
mapping := m.TypeMap(reflect.TypeOf(strategy))
for _, key := range []string{"strategy_id", "STRATEGYNAME"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
}
func TestMapping(t *testing.T) {
type Person struct {
ID int
Name string
WearsGlasses bool `db:"wears_glasses"`
}
m := NewMapperFunc("db", strings.ToLower)
p := Person{1, "Jason", true}
mapping := m.TypeMap(reflect.TypeOf(p))
for _, key := range []string{"id", "name", "wears_glasses"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type SportsPerson struct {
Weight int
Age int
Person
}
s := SportsPerson{Weight: 100, Age: 30, Person: p}
mapping = m.TypeMap(reflect.TypeOf(s))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type RugbyPlayer struct {
Position int
IsIntense bool `db:"is_intense"`
IsAllBlack bool `db:"-"`
SportsPerson
}
r := RugbyPlayer{12, true, false, s}
mapping = m.TypeMap(reflect.TypeOf(r))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
if fi := mapping.GetByPath("isallblack"); fi != nil {
t.Errorf("Expecting to ignore `IsAllBlack` field")
}
}
func TestGetByTraversal(t *testing.T) {
type C struct {
C0 int
C1 int
}
type B struct {
B0 string
B1 *C
}
type A struct {
A0 int
A1 B
}
testCases := []struct {
Index []int
ExpectedName string
ExpectNil bool
}{
{
Index: []int{0},
ExpectedName: "A0",
},
{
Index: []int{1, 0},
ExpectedName: "B0",
},
{
Index: []int{1, 1, 1},
ExpectedName: "C1",
},
{
Index: []int{3, 4, 5},
ExpectNil: true,
},
{
Index: []int{},
ExpectNil: true,
},
{
Index: nil,
ExpectNil: true,
},
}
m := NewMapperFunc("db", func(n string) string { return n })
tm := m.TypeMap(reflect.TypeOf(A{}))
for i, tc := range testCases {
fi := tm.GetByTraversal(tc.Index)
if tc.ExpectNil {
if fi != nil {
t.Errorf("%d: expected nil, got %v", i, fi)
}
continue
}
if fi == nil {
t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName)
continue
}
if fi.Name != tc.ExpectedName {
t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name)
}
}
}
// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName
func TestMapperMethodsByName(t *testing.T) {
type C struct {
C0 string
C1 int
}
type B struct {
B0 *C `db:"B0"`
B1 C `db:"B1"`
B2 string `db:"B2"`
}
type A struct {
A0 *B `db:"A0"`
B `db:"A1"`
A2 int
a3 int
}
val := &A{
A0: &B{
B0: &C{C0: "0", C1: 1},
B1: C{C0: "2", C1: 3},
B2: "4",
},
B: B{
B0: nil,
B1: C{C0: "5", C1: 6},
B2: "7",
},
A2: 8,
}
testCases := []struct {
Name string
ExpectInvalid bool
ExpectedValue interface{}
ExpectedIndexes []int
}{
{
Name: "A0.B0.C0",
ExpectedValue: "0",
ExpectedIndexes: []int{0, 0, 0},
},
{
Name: "A0.B0.C1",
ExpectedValue: 1,
ExpectedIndexes: []int{0, 0, 1},
},
{
Name: "A0.B1.C0",
ExpectedValue: "2",
ExpectedIndexes: []int{0, 1, 0},
},
{
Name: "A0.B1.C1",
ExpectedValue: 3,
ExpectedIndexes: []int{0, 1, 1},
},
{
Name: "A0.B2",
ExpectedValue: "4",
ExpectedIndexes: []int{0, 2},
},
{
Name: "A1.B0.C0",
ExpectedValue: "",
ExpectedIndexes: []int{1, 0, 0},
},
{
Name: "A1.B0.C1",
ExpectedValue: 0,
ExpectedIndexes: []int{1, 0, 1},
},
{
Name: "A1.B1.C0",
ExpectedValue: "5",
ExpectedIndexes: []int{1, 1, 0},
},
{
Name: "A1.B1.C1",
ExpectedValue: 6,
ExpectedIndexes: []int{1, 1, 1},
},
{
Name: "A1.B2",
ExpectedValue: "7",
ExpectedIndexes: []int{1, 2},
},
{
Name: "A2",
ExpectedValue: 8,
ExpectedIndexes: []int{2},
},
{
Name: "XYZ",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
{
Name: "a3",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
}
// build the names array from the test cases
names := make([]string, len(testCases))
for i, tc := range testCases {
names[i] = tc.Name
}
m := NewMapperFunc("db", func(n string) string { return n })
v := reflect.ValueOf(val)
values := m.FieldsByName(v, names)
if len(values) != len(testCases) {
t.Errorf("expected %d values, got %d", len(testCases), len(values))
t.FailNow()
}
indexes := m.TraversalsByName(v.Type(), names)
if len(indexes) != len(testCases) {
t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes))
t.FailNow()
}
for i, val := range values {
tc := testCases[i]
traversal := indexes[i]
if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) {
t.Errorf("expected %v, got %v", tc.ExpectedIndexes, traversal)
t.FailNow()
}
val = reflect.Indirect(val)
if tc.ExpectInvalid {
if val.IsValid() {
t.Errorf("%d: expected zero value, got %v", i, val)
}
continue
}
if !val.IsValid() {
t.Errorf("%d: expected valid value, got %v", i, val)
continue
}
actualValue := reflect.Indirect(val).Interface()
if !reflect.DeepEqual(tc.ExpectedValue, actualValue) {
t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue)
}
}
}
func TestFieldByIndexes(t *testing.T) {
type C struct {
C0 bool
C1 string
C2 int
C3 map[string]int
}
type B struct {
B1 C
B2 *C
}
type A struct {
A1 B
A2 *B
}
testCases := []struct {
value interface{}
indexes []int
expectedValue interface{}
readOnly bool
}{
{
value: A{
A1: B{B1: C{C0: true}},
},
indexes: []int{0, 0, 0},
expectedValue: true,
readOnly: true,
},
{
value: A{
A2: &B{B2: &C{C1: "answer"}},
},
indexes: []int{1, 1, 1},
expectedValue: "answer",
readOnly: true,
},
{
value: &A{},
indexes: []int{1, 1, 3},
expectedValue: map[string]int{},
},
}
for i, tc := range testCases {
checkResults := func(v reflect.Value) {
if tc.expectedValue == nil {
if !v.IsNil() {
t.Errorf("%d: expected nil, actual %v", i, v.Interface())
}
} else {
if !reflect.DeepEqual(tc.expectedValue, v.Interface()) {
t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface())
}
}
}
checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes))
if tc.readOnly {
checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes))
}
}
}
func TestMustBe(t *testing.T) {
typ := reflect.TypeOf(E1{})
mustBe(typ, reflect.Struct)
defer func() {
if r := recover(); r != nil {
valueErr, ok := r.(*reflect.ValueError)
if !ok {
t.Errorf("unexpected Method: %s", valueErr.Method)
t.Error("expected panic with *reflect.ValueError")
return
}
if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" {
}
if valueErr.Kind != reflect.String {
t.Errorf("unexpected Kind: %s", valueErr.Kind)
}
} else {
t.Error("expected panic")
}
}()
typ = reflect.TypeOf("string")
mustBe(typ, reflect.Struct)
t.Error("got here, didn't expect to")
}
type E1 struct {
A int
}
type E2 struct {
E1
B int
}
type E3 struct {
E2
C int
}
type E4 struct {
E3
D int
}
func BenchmarkFieldNameL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("D")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldNameL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("A")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(1)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(0)
f = f.Field(0)
f = f.Field(0)
f = f.Field(0)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldByIndexL4(b *testing.B) {
e4 := E4{}
e4.A = 1
idx := []int{0, 0, 0, 0}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := FieldByIndexes(v, idx)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}

1035
vendor/github.com/jmoiron/sqlx/sqlx.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

335
vendor/github.com/jmoiron/sqlx/sqlx_context.go generated vendored Normal file
View File

@@ -0,0 +1,335 @@
// +build go1.8
package sqlx
import (
"context"
"database/sql"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
)
// ConnectContext to a database and verify with a ping.
func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
db, err := Open(driverName, dataSourceName)
if err != nil {
return db, err
}
err = db.PingContext(ctx)
return db, err
}
// QueryerContext is an interface used by GetContext and SelectContext
type QueryerContext interface {
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
}
// PreparerContext is an interface used by PreparexContext.
type PreparerContext interface {
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
}
// ExecerContext is an interface used by MustExecContext and LoadFileContext
type ExecerContext interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
}
// ExtContext is a union interface which can bind, query, and exec, with Context
// used by NamedQueryContext and NamedExecContext.
type ExtContext interface {
binder
QueryerContext
ExecerContext
}
// SelectContext executes a query using the provided Queryer, and StructScans
// each row into dest, which must be a slice. If the slice elements are
// scannable, then the result set must have only one column. Otherwise,
// StructScan is used. The *sql.Rows are closed automatically.
// Any placeholder parameters are replaced with supplied args.
func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
rows, err := q.QueryxContext(ctx, query, args...)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// PreparexContext prepares a statement.
//
// The provided context is used for the preparation of the statement, not for
// the execution of the statement.
func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
s, err := p.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
}
// GetContext does a QueryRow using the provided Queryer, and scans the
// resulting row to dest. If dest is scannable, the result must only have one
// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
// row.Scan would. Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
r := q.QueryRowxContext(ctx, query, args...)
return r.scanAny(dest, false)
}
// LoadFileContext exec's every statement in a file (as a single call to Exec).
// LoadFileContext may return a nil *sql.Result if errors are encountered
// locating or reading the file at path. LoadFile reads the entire file into
// memory, so it is not suitable for loading large data dumps, but can be useful
// for initializing schemas or loading indexes.
//
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
// this by requiring something with DriverName() and then attempting to split the
// queries will be difficult to get right, and its current driver-specific behavior
// is deemed at least not complex in its incorrectness.
func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
realpath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
contents, err := ioutil.ReadFile(realpath)
if err != nil {
return nil, err
}
res, err := e.ExecContext(ctx, string(contents))
return &res, err
}
// MustExecContext execs the query using e and panics if there was an error.
// Any placeholder parameters are replaced with supplied args.
func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
res, err := e.ExecContext(ctx, query, args...)
if err != nil {
panic(err)
}
return res
}
// PrepareNamedContext returns an sqlx.NamedStmt
func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
return prepareNamedContext(ctx, db, query)
}
// NamedQueryContext using this DB.
// Any named placeholder parameters are replaced with fields from arg.
func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
return NamedQueryContext(ctx, db, query, arg)
}
// NamedExecContext using this DB.
// Any named placeholder parameters are replaced with fields from arg.
func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
return NamedExecContext(ctx, db, query, arg)
}
// SelectContext using this DB.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return SelectContext(ctx, db, dest, query, args...)
}
// GetContext using this DB.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return GetContext(ctx, db, dest, query, args...)
}
// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
//
// The provided context is used for the preparation of the statement, not for
// the execution of the statement.
func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
return PreparexContext(ctx, db, query)
}
// QueryxContext queries the database and returns an *sqlx.Rows.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := db.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// QueryRowxContext queries the database and returns an *sqlx.Row.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := db.DB.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
}
// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
// of an *sql.Tx.
//
// The provided context is used until the transaction is committed or rolled
// back. If the context is canceled, the sql package will roll back the
// transaction. Tx.Commit will return an error if the context provided to
// MustBeginContext is canceled.
func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
tx, err := db.BeginTxx(ctx, opts)
if err != nil {
panic(err)
}
return tx
}
// MustExecContext (panic) runs MustExec using this database.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
return MustExecContext(ctx, db, query, args...)
}
// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
// *sql.Tx.
//
// The provided context is used until the transaction is committed or rolled
// back. If the context is canceled, the sql package will roll back the
// transaction. Tx.Commit will return an error if the context provided to
// BeginxContext is canceled.
func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
tx, err := db.DB.BeginTx(ctx, opts)
if err != nil {
return nil, err
}
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// StmtxContext returns a version of the prepared statement which runs within a
// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
var s *sql.Stmt
switch v := stmt.(type) {
case Stmt:
s = v.Stmt
case *Stmt:
s = v.Stmt
case sql.Stmt:
s = &v
case *sql.Stmt:
s = v
default:
panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
}
return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
}
// NamedStmtContext returns a version of the prepared statement which runs
// within a transaction.
func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
return &NamedStmt{
QueryString: stmt.QueryString,
Params: stmt.Params,
Stmt: tx.StmtxContext(ctx, stmt.Stmt),
}
}
// MustExecContext runs MustExecContext within a transaction.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
return MustExecContext(ctx, tx, query, args...)
}
// QueryxContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := tx.Tx.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
}
// SelectContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return SelectContext(ctx, tx, dest, query, args...)
}
// GetContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return GetContext(ctx, tx, dest, query, args...)
}
// QueryRowxContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := tx.Tx.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
}
// NamedExecContext using this Tx.
// Any named placeholder parameters are replaced with fields from arg.
func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
return NamedExecContext(ctx, tx, query, arg)
}
// SelectContext using the prepared statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
return SelectContext(ctx, &qStmt{s}, dest, "", args...)
}
// GetContext using the prepared statement.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
return GetContext(ctx, &qStmt{s}, dest, "", args...)
}
// MustExecContext (panic) using this statement. Note that the query portion of
// the error output will be blank, as Stmt does not expose its query.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
return MustExecContext(ctx, &qStmt{s}, "", args...)
}
// QueryRowxContext using this statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
qs := &qStmt{s}
return qs.QueryRowxContext(ctx, "", args...)
}
// QueryxContext using this statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
qs := &qStmt{s}
return qs.QueryxContext(ctx, "", args...)
}
func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
return q.Stmt.QueryContext(ctx, args...)
}
func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := q.Stmt.QueryContext(ctx, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
}
func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := q.Stmt.QueryContext(ctx, args...)
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
}
func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
return q.Stmt.ExecContext(ctx, args...)
}

1344
vendor/github.com/jmoiron/sqlx/sqlx_context_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1792
vendor/github.com/jmoiron/sqlx/sqlx_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

5
vendor/github.com/jmoiron/sqlx/types/README.md generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# types
The types package provides some useful types which implement the `sql.Scanner`
and `driver.Valuer` interfaces, suitable for use as scan and value targets with
database/sql.

172
vendor/github.com/jmoiron/sqlx/types/types.go generated vendored Normal file
View File

@@ -0,0 +1,172 @@
package types
import (
"bytes"
"compress/gzip"
"database/sql/driver"
"encoding/json"
"errors"
"io/ioutil"
)
// GzippedText is a []byte which transparently gzips data being submitted to
// a database and ungzips data being Scanned from a database.
type GzippedText []byte
// Value implements the driver.Valuer interface, gzipping the raw value of
// this GzippedText.
func (g GzippedText) Value() (driver.Value, error) {
b := make([]byte, 0, len(g))
buf := bytes.NewBuffer(b)
w := gzip.NewWriter(buf)
w.Write(g)
w.Close()
return buf.Bytes(), nil
}
// Scan implements the sql.Scanner interface, ungzipping the value coming off
// the wire and storing the raw result in the GzippedText.
func (g *GzippedText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for GzippedText")
}
reader, err := gzip.NewReader(bytes.NewReader(source))
if err != nil {
return err
}
defer reader.Close()
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
*g = GzippedText(b)
return nil
}
// JSONText is a json.RawMessage, which is a []byte underneath.
// Value() validates the json format in the source, and returns an error if
// the json is not valid. Scan does no validation. JSONText additionally
// implements `Unmarshal`, which unmarshals the json within to an interface{}
type JSONText json.RawMessage
var emptyJSON = JSONText("{}")
// MarshalJSON returns the *j as the JSON encoding of j.
func (j JSONText) MarshalJSON() ([]byte, error) {
if len(j) == 0 {
return emptyJSON, nil
}
return j, nil
}
// UnmarshalJSON sets *j to a copy of data
func (j *JSONText) UnmarshalJSON(data []byte) error {
if j == nil {
return errors.New("JSONText: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], data...)
return nil
}
// Value returns j as a value. This does a validating unmarshal into another
// RawMessage. If j is invalid json, it returns an error.
func (j JSONText) Value() (driver.Value, error) {
var m json.RawMessage
var err = j.Unmarshal(&m)
if err != nil {
return []byte{}, err
}
return []byte(j), nil
}
// Scan stores the src in *j. No validation is done.
func (j *JSONText) Scan(src interface{}) error {
var source []byte
switch t := src.(type) {
case string:
source = []byte(t)
case []byte:
if len(t) == 0 {
source = emptyJSON
} else {
source = t
}
case nil:
*j = emptyJSON
default:
return errors.New("Incompatible type for JSONText")
}
*j = JSONText(append((*j)[0:0], source...))
return nil
}
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
func (j *JSONText) Unmarshal(v interface{}) error {
if len(*j) == 0 {
*j = emptyJSON
}
return json.Unmarshal([]byte(*j), v)
}
// String supports pretty printing for JSONText types.
func (j JSONText) String() string {
return string(j)
}
// NullJSONText represents a JSONText that may be null.
// NullJSONText implements the scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullJSONText struct {
JSONText
Valid bool // Valid is true if JSONText is not NULL
}
// Scan implements the Scanner interface.
func (n *NullJSONText) Scan(value interface{}) error {
if value == nil {
n.JSONText, n.Valid = emptyJSON, false
return nil
}
n.Valid = true
return n.JSONText.Scan(value)
}
// Value implements the driver Valuer interface.
func (n NullJSONText) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.JSONText.Value()
}
// BitBool is an implementation of a bool for the MySQL type BIT(1).
// This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT.
type BitBool bool
// Value implements the driver.Valuer interface,
// and turns the BitBool into a bitfield (BIT(1)) for MySQL storage.
func (b BitBool) Value() (driver.Value, error) {
if b {
return []byte{1}, nil
}
return []byte{0}, nil
}
// Scan implements the sql.Scanner interface,
// and turns the bitfield incoming from MySQL into a BitBool
func (b *BitBool) Scan(src interface{}) error {
v, ok := src.([]byte)
if !ok {
return errors.New("bad []byte type assertion")
}
*b = v[0] == 1
return nil
}

127
vendor/github.com/jmoiron/sqlx/types/types_test.go generated vendored Normal file
View File

@@ -0,0 +1,127 @@
package types
import "testing"
func TestGzipText(t *testing.T) {
g := GzippedText("Hello, world")
v, err := g.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&g).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
if string(g) != "Hello, world" {
t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g))
}
}
func TestJSONText(t *testing.T) {
j := JSONText(`{"foo": 1, "bar": 2}`)
v, err := j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
m := map[string]interface{}{}
j.Unmarshal(&m)
if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
t.Errorf("Expected valid json but got some garbage instead? %#v", m)
}
j = JSONText(`{"foo": 1, invalid, false}`)
v, err = j.Value()
if err == nil {
t.Errorf("Was expecting invalid json to fail!")
}
j = JSONText("")
v, err = j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
j = JSONText(nil)
v, err = j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
}
func TestNullJSONText(t *testing.T) {
j := NullJSONText{}
err := j.Scan(`{"foo": 1, "bar": 2}`)
if err != nil {
t.Errorf("Was not expecting an error")
}
v, err := j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
m := map[string]interface{}{}
j.Unmarshal(&m)
if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
t.Errorf("Expected valid json but got some garbage instead? %#v", m)
}
j = NullJSONText{}
err = j.Scan(nil)
if err != nil {
t.Errorf("Was not expecting an error")
}
if j.Valid != false {
t.Errorf("Expected valid to be false, but got true")
}
}
func TestBitBool(t *testing.T) {
// Test true value
var b BitBool = true
v, err := b.Value()
if err != nil {
t.Errorf("Cannot return error")
}
err = (&b).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
if !b {
t.Errorf("Was expecting the bool we sent in (true), got %v", b)
}
// Test false value
b = false
v, err = b.Value()
if err != nil {
t.Errorf("Cannot return error")
}
err = (&b).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
if b {
t.Errorf("Was expecting the bool we sent in (false), got %v", b)
}
}

4
vendor/github.com/mattn/go-sqlite3/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
*.db
*.exe
*.dll
*.o

18
vendor/github.com/mattn/go-sqlite3/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,18 @@
language: go
sudo: required
dist: trusty
env:
- GOTAGS=
- GOTAGS=libsqlite3
- GOTAGS=trace
- GOTAGS=vtable
go:
- 1.7
- 1.8
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
- go test -race -v . -tags "$GOTAGS"

21
vendor/github.com/mattn/go-sqlite3/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

97
vendor/github.com/mattn/go-sqlite3/README.md generated vendored Normal file
View File

@@ -0,0 +1,97 @@
go-sqlite3
==========
[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3)
Description
-----------
sqlite3 driver conforming to the built-in database/sql interface
Installation
------------
This package can be installed with the go get command:
go get github.com/mattn/go-sqlite3
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
Documentation
-------------
API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
Examples can be found under the `./_example` directory
FAQ
---
* Want to build go-sqlite3 with libsqlite3 on my linux.
Use `go build --tags "libsqlite3 linux"`
* Want to build go-sqlite3 with libsqlite3 on OS X.
Install sqlite3 from homebrew: `brew install sqlite3`
Use `go build --tags "libsqlite3 darwin"`
* Want to build go-sqlite3 with icu extension.
Use `go build --tags "icu"`
Available extensions: `json1`, `fts5`, `icu`
* Can't build go-sqlite3 on windows 64bit.
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
> See: [#27](https://github.com/mattn/go-sqlite3/issues/27)
* Getting insert error while query is opened.
> You can pass some arguments into the connection string, for example, a URI.
> See: [#39](https://github.com/mattn/go-sqlite3/issues/39)
* Do you want to cross compile? mingw on Linux or Mac?
> See: [#106](https://github.com/mattn/go-sqlite3/issues/106)
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
* Want to get time.Time with current locale
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
* Can I use this in multiple routines concurrently?
Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209).
* Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
Each connection to :memory: opens a brand new in-memory sql database, so if
the stdlib's sql engine happens to open another connection and you've only
specified ":memory:", that connection will see a brand new database. A
workaround is to use "file::memory:?mode=memory&cache=shared". Every
connection to this string will point to the same in-memory database. See
[#204](https://github.com/mattn/go-sqlite3/issues/204) for more info.
License
-------
MIT: http://mattn.mit-license.org/2012
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
The -binding suffix was added to avoid build failures under gccgo.
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
Author
------
Yasuhiro Matsumoto (a.k.a mattn)

View File

@@ -0,0 +1,133 @@
package main
import (
"database/sql"
"fmt"
"log"
"math"
"math/rand"
sqlite "github.com/mattn/go-sqlite3"
)
// Computes x^y
func pow(x, y int64) int64 {
return int64(math.Pow(float64(x), float64(y)))
}
// Computes the bitwise exclusive-or of all its arguments
func xor(xs ...int64) int64 {
var ret int64
for _, x := range xs {
ret ^= x
}
return ret
}
// Returns a random number. It's actually deterministic here because
// we don't seed the RNG, but it's an example of a non-pure function
// from SQLite's POV.
func getrand() int64 {
return rand.Int63()
}
// Computes the standard deviation of a GROUPed BY set of values
type stddev struct {
xs []int64
// Running average calculation
sum int64
n int64
}
func newStddev() *stddev { return &stddev{} }
func (s *stddev) Step(x int64) {
s.xs = append(s.xs, x)
s.sum += x
s.n++
}
func (s *stddev) Done() float64 {
mean := float64(s.sum) / float64(s.n)
var sqDiff []float64
for _, x := range s.xs {
sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
}
var dev float64
for _, x := range sqDiff {
dev += x
}
dev /= float64(len(sqDiff))
return math.Sqrt(dev)
}
func main() {
sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
ConnectHook: func(conn *sqlite.SQLiteConn) error {
if err := conn.RegisterFunc("pow", pow, true); err != nil {
return err
}
if err := conn.RegisterFunc("xor", xor, true); err != nil {
return err
}
if err := conn.RegisterFunc("rand", getrand, false); err != nil {
return err
}
if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
return err
}
return nil
},
})
db, err := sql.Open("sqlite3_custom", ":memory:")
if err != nil {
log.Fatal("Failed to open database:", err)
}
defer db.Close()
var i int64
err = db.QueryRow("SELECT pow(2,3)").Scan(&i)
if err != nil {
log.Fatal("POW query error:", err)
}
fmt.Println("pow(2,3) =", i) // 8
err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i)
if err != nil {
log.Fatal("XOR query error:", err)
}
fmt.Println("xor(1,2,3,4,5) =", i) // 7
err = db.QueryRow("SELECT rand()").Scan(&i)
if err != nil {
log.Fatal("RAND query error:", err)
}
fmt.Println("rand() =", i) // pseudorandom
_, err = db.Exec("create table foo (department integer, profits integer)")
if err != nil {
log.Fatal("Failed to create table:", err)
}
_, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)")
if err != nil {
log.Fatal("Failed to insert records:", err)
}
rows, err := db.Query("select department, stddev(profits) from foo group by department")
if err != nil {
log.Fatal("STDDEV query error:", err)
}
defer rows.Close()
for rows.Next() {
var dept int64
var dev float64
if err := rows.Scan(&dept, &dev); err != nil {
log.Fatal(err)
}
fmt.Printf("dept=%d stddev=%f\n", dept, dev)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,78 @@
package main
import (
"database/sql"
"log"
"os"
"github.com/mattn/go-sqlite3"
)
func main() {
sqlite3conn := []*sqlite3.SQLiteConn{}
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
conn.RegisterUpdateHook(func(op int, db string, table string, rowid int64) {
switch op {
case sqlite3.SQLITE_INSERT:
log.Println("Notified of insert on db", db, "table", table, "rowid", rowid)
}
})
return nil
},
})
os.Remove("./foo.db")
os.Remove("./bar.db")
srcDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer srcDb.Close()
srcDb.Ping()
_, err = srcDb.Exec("create table foo(id int, value text)")
if err != nil {
log.Fatal(err)
}
_, err = srcDb.Exec("insert into foo values(1, 'foo')")
if err != nil {
log.Fatal(err)
}
_, err = srcDb.Exec("insert into foo values(2, 'bar')")
if err != nil {
log.Fatal(err)
}
_, err = srcDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
destDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db")
if err != nil {
log.Fatal(err)
}
defer destDb.Close()
destDb.Ping()
bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main")
if err != nil {
log.Fatal(err)
}
_, err = bk.Step(-1)
if err != nil {
log.Fatal(err)
}
_, err = destDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(3, 'bar')")
if err != nil {
log.Fatal(err)
}
bk.Finish()
}

View File

@@ -0,0 +1,22 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_regexp.dll
RM=cmd /c del
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_regexp.so
RM=rm
LDFLAG=-fPIC
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_regexp.c
gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre
clean :
@-$(RM) $(EXE) $(EXT)

View File

@@ -0,0 +1,43 @@
package main
import (
"database/sql"
"fmt"
"github.com/mattn/go-sqlite3"
"log"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// Force db to make a new connection in pool
// by putting the original in a transaction
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
defer tx.Commit()
// New connection works (hopefully!)
rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var helloworld string
rows.Scan(&helloworld)
fmt.Println(helloworld)
}
}

View File

@@ -0,0 +1,31 @@
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL);
}

View File

@@ -0,0 +1,24 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_vtable.dll
RM=cmd /c del
LIBCURL=-lcurldll
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_vtable.so
RM=rm
LDFLAG=-fPIC
LIBCURL=-lcurl
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_vtable.cc
g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL)
clean :
@-$(RM) $(EXE) $(EXT)

View File

@@ -0,0 +1,37 @@
package main
import (
"database/sql"
"fmt"
"log"
"github.com/mattn/go-sqlite3"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_vtable",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
rows, err := db.Query("select id, full_name, description, html_url from repo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id, fullName, description, htmlURL string
rows.Scan(&id, &fullName, &description, &htmlURL)
fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,238 @@
#include <string>
#include <sstream>
#include <sqlite3-binding.h>
#include <sqlite3ext.h>
#include <curl/curl.h>
#include "picojson.h"
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
SQLITE_EXTENSION_INIT1;
typedef struct {
char* data; // response data from server
size_t size; // response size of data
} MEMFILE;
MEMFILE*
memfopen() {
MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE));
if (mf) {
mf->data = NULL;
mf->size = 0;
}
return mf;
}
void
memfclose(MEMFILE* mf) {
if (mf->data) free(mf->data);
free(mf);
}
size_t
memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) {
MEMFILE* mf = (MEMFILE*) stream;
int block = size * nmemb;
if (!mf) return block; // through
if (!mf->data)
mf->data = (char*) malloc(block);
else
mf->data = (char*) realloc(mf->data, mf->size + block);
if (mf->data) {
memcpy(mf->data + mf->size, ptr, block);
mf->size += block;
}
return block;
}
char*
memfstrdup(MEMFILE* mf) {
char* buf;
if (mf->size == 0) return NULL;
buf = (char*) malloc(mf->size + 1);
memcpy(buf, mf->data, mf->size);
buf[mf->size] = 0;
return buf;
}
static int
my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) {
std::stringstream ss;
ss << "CREATE TABLE " << argv[0]
<< "(id int, full_name text, description text, html_url text)";
int rc = sqlite3_declare_vtab(db, ss.str().c_str());
*ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab));
memset(*ppVTab, 0, sizeof(sqlite3_vtab));
return rc;
}
static int
my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) {
return my_connect(db, pAux, argc, argv, ppVTab, c);
}
static int my_disconnect(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
static int
my_destroy(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
typedef struct {
sqlite3_vtab_cursor base;
int index;
picojson::value* rows;
} cursor;
static int
my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
MEMFILE* mf;
CURL* curl;
char* json;
CURLcode res = CURLE_OK;
char error[CURL_ERROR_SIZE] = {0};
char* cert_file = getenv("SSL_CERT_FILE");
mf = memfopen();
curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2);
curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0");
curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories");
if (cert_file)
curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if (res != CURLE_OK) {
std::cerr << error << std::endl;
return SQLITE_FAIL;
}
picojson::value* v = new picojson::value;
std::string err;
picojson::parse(*v, mf->data, mf->data + mf->size, &err);
memfclose(mf);
if (!err.empty()) {
delete v;
std::cerr << err << std::endl;
return SQLITE_FAIL;
}
cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor));
c->rows = v;
c->index = 0;
*ppCursor = &c->base;
return SQLITE_OK;
}
static int
my_close(cursor *c) {
delete c->rows;
sqlite3_free(c);
return SQLITE_OK;
}
static int
my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
c->index = 0;
return SQLITE_OK;
}
static int
my_next(cursor *c) {
c->index++;
return SQLITE_OK;
}
static int
my_eof(cursor *c) {
return c->index >= c->rows->get<picojson::array>().size() ? 1 : 0;
}
static int
my_column(cursor *c, sqlite3_context *ctxt, int i) {
picojson::value v = c->rows->get<picojson::array>()[c->index];
picojson::object row = v.get<picojson::object>();
const char* p = NULL;
switch (i) {
case 0:
p = row["id"].to_str().c_str();
break;
case 1:
p = row["full_name"].to_str().c_str();
break;
case 2:
p = row["description"].to_str().c_str();
break;
case 3:
p = row["html_url"].to_str().c_str();
break;
}
sqlite3_result_text(ctxt, strdup(p), strlen(p), free);
return SQLITE_OK;
}
static int
my_rowid(cursor *c, sqlite3_int64 *pRowid) {
*pRowid = c->index;
return SQLITE_OK;
}
static int
my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) {
return SQLITE_OK;
}
static const sqlite3_module module = {
0,
my_create,
my_connect,
my_bestindex,
my_disconnect,
my_destroy,
my_open,
(int (*)(sqlite3_vtab_cursor *)) my_close,
(int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter,
(int (*)(sqlite3_vtab_cursor *)) my_next,
(int (*)(sqlite3_vtab_cursor *)) my_eof,
(int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column,
(int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid,
NULL, // my_update
NULL, // my_begin
NULL, // my_sync
NULL, // my_commit
NULL, // my_rollback
NULL, // my_findfunction
NULL, // my_rename
};
static void
destructor(void *arg) {
return;
}
extern "C" {
EXPORT int
sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
sqlite3_create_module_v2(db, "github", &module, NULL, destructor);
return 0;
}
}

View File

@@ -0,0 +1,106 @@
package main
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3"
"log"
"os"
)
func main() {
os.Remove("./foo.db")
db, err := sql.Open("sqlite3", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer db.Close()
sqlStmt := `
create table foo (id integer not null primary key, name text);
delete from foo;
`
_, err = db.Exec(sqlStmt)
if err != nil {
log.Printf("%q: %s\n", err, sqlStmt)
return
}
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 100; i++ {
_, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i))
if err != nil {
log.Fatal(err)
}
}
tx.Commit()
rows, err := db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
stmt, err = db.Prepare("select name from foo where id = ?")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
var name string
err = stmt.QueryRow("3").Scan(&name)
if err != nil {
log.Fatal(err)
}
fmt.Println(name)
_, err = db.Exec("delete from foo")
if err != nil {
log.Fatal(err)
}
_, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')")
if err != nil {
log.Fatal(err)
}
rows, err = db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,264 @@
package main
import (
"database/sql"
"fmt"
"log"
"os"
sqlite3 "github.com/mattn/go-sqlite3"
)
func traceCallback(info sqlite3.TraceInfo) int {
// Not very readable but may be useful; uncomment next line in case of doubt:
//fmt.Printf("Trace: %#v\n", info)
var dbErrText string
if info.DBError.Code != 0 || info.DBError.ExtendedCode != 0 {
dbErrText = fmt.Sprintf("; DB error: %#v", info.DBError)
} else {
dbErrText = "."
}
// Show the Statement-or-Trigger text in curly braces ('{', '}')
// since from the *paired* ASCII characters they are
// the least used in SQL syntax, therefore better visual delimiters.
// Maybe show 'ExpandedSQL' the same way as 'StmtOrTrigger'.
//
// A known use of curly braces (outside strings) is
// for ODBC escape sequences. Not likely to appear here.
//
// Template languages, etc. don't matter, we should see their *result*
// at *this* level.
// Strange curly braces in SQL code that reached the database driver
// suggest that there is a bug in the application.
// The braces are likely to be either template syntax or
// a programming language's string interpolation syntax.
var expandedText string
if info.ExpandedSQL != "" {
if info.ExpandedSQL == info.StmtOrTrigger {
expandedText = " = exp"
} else {
expandedText = fmt.Sprintf(" expanded {%q}", info.ExpandedSQL)
}
} else {
expandedText = ""
}
// SQLite docs as of September 6, 2016: Tracing and Profiling Functions
// https://www.sqlite.org/c3ref/profile.html
//
// The profile callback time is in units of nanoseconds, however
// the current implementation is only capable of millisecond resolution
// so the six least significant digits in the time are meaningless.
// Future versions of SQLite might provide greater resolution on the profiler callback.
var runTimeText string
if info.RunTimeNanosec == 0 {
if info.EventCode == sqlite3.TraceProfile {
//runTimeText = "; no time" // seems confusing
runTimeText = "; time 0" // no measurement unit
} else {
//runTimeText = "; no time" // seems useless and confusing
}
} else {
const nanosPerMillisec = 1000000
if info.RunTimeNanosec%nanosPerMillisec == 0 {
runTimeText = fmt.Sprintf("; time %d ms", info.RunTimeNanosec/nanosPerMillisec)
} else {
// unexpected: better than millisecond resolution
runTimeText = fmt.Sprintf("; time %d ns!!!", info.RunTimeNanosec)
}
}
var modeText string
if info.AutoCommit {
modeText = "-AC-"
} else {
modeText = "+Tx+"
}
fmt.Printf("Trace: ev %d %s conn 0x%x, stmt 0x%x {%q}%s%s%s\n",
info.EventCode, modeText, info.ConnHandle, info.StmtHandle,
info.StmtOrTrigger, expandedText,
runTimeText,
dbErrText)
return 0
}
func main() {
eventMask := sqlite3.TraceStmt | sqlite3.TraceProfile | sqlite3.TraceRow | sqlite3.TraceClose
sql.Register("sqlite3_tracing",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
err := conn.SetTrace(&sqlite3.TraceConfig{
Callback: traceCallback,
EventMask: uint(eventMask),
WantExpandedSQL: true,
})
return err
},
})
os.Exit(dbMain())
}
// Harder to do DB work in main().
// It's better with a separate function because
// 'defer' and 'os.Exit' don't go well together.
//
// DO NOT use 'log.Fatal...' below: remember that it's equivalent to
// Print() followed by a call to os.Exit(1) --- and
// we want to avoid Exit() so 'defer' can do cleanup.
// Use 'log.Panic...' instead.
func dbMain() int {
db, err := sql.Open("sqlite3_tracing", ":memory:")
if err != nil {
fmt.Printf("Failed to open database: %#+v\n", err)
return 1
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Panic(err)
}
dbSetup(db)
dbDoInsert(db)
dbDoInsertPrepared(db)
dbDoSelect(db)
dbDoSelectPrepared(db)
return 0
}
// 'DDL' stands for "Data Definition Language":
// Note: "INTEGER PRIMARY KEY NOT NULL AUTOINCREMENT" causes the error
// 'near "AUTOINCREMENT": syntax error'; without "NOT NULL" it works.
const tableDDL = `CREATE TABLE t1 (
id INTEGER PRIMARY KEY AUTOINCREMENT,
note VARCHAR NOT NULL
)`
// 'DML' stands for "Data Manipulation Language":
const insertDML = "INSERT INTO t1 (note) VALUES (?)"
const selectDML = "SELECT id, note FROM t1 WHERE note LIKE ?"
const textPrefix = "bla-1234567890-"
const noteTextPattern = "%Prep%"
const nGenRows = 4 // Number of Rows to Generate (for *each* approach tested)
func dbSetup(db *sql.DB) {
var err error
_, err = db.Exec("DROP TABLE IF EXISTS t1")
if err != nil {
log.Panic(err)
}
_, err = db.Exec(tableDDL)
if err != nil {
log.Panic(err)
}
}
func dbDoInsert(db *sql.DB) {
const Descr = "DB-Exec"
for i := 0; i < nGenRows; i++ {
result, err := db.Exec(insertDML, textPrefix+Descr)
if err != nil {
log.Panic(err)
}
resultDoCheck(result, Descr, i)
}
}
func dbDoInsertPrepared(db *sql.DB) {
const Descr = "DB-Prepare"
stmt, err := db.Prepare(insertDML)
if err != nil {
log.Panic(err)
}
defer stmt.Close()
for i := 0; i < nGenRows; i++ {
result, err := stmt.Exec(textPrefix + Descr)
if err != nil {
log.Panic(err)
}
resultDoCheck(result, Descr, i)
}
}
func resultDoCheck(result sql.Result, callerDescr string, callIndex int) {
lastID, err := result.LastInsertId()
if err != nil {
log.Panic(err)
}
nAffected, err := result.RowsAffected()
if err != nil {
log.Panic(err)
}
log.Printf("Exec result for %s (%d): ID = %d, affected = %d\n", callerDescr, callIndex, lastID, nAffected)
}
func dbDoSelect(db *sql.DB) {
const Descr = "DB-Query"
rows, err := db.Query(selectDML, noteTextPattern)
if err != nil {
log.Panic(err)
}
defer rows.Close()
rowsDoFetch(rows, Descr)
}
func dbDoSelectPrepared(db *sql.DB) {
const Descr = "DB-Prepare"
stmt, err := db.Prepare(selectDML)
if err != nil {
log.Panic(err)
}
defer stmt.Close()
rows, err := stmt.Query(noteTextPattern)
if err != nil {
log.Panic(err)
}
defer rows.Close()
rowsDoFetch(rows, Descr)
}
func rowsDoFetch(rows *sql.Rows, callerDescr string) {
var nRows int
var id int64
var note string
for rows.Next() {
err := rows.Scan(&id, &note)
if err != nil {
log.Panic(err)
}
log.Printf("Row for %s (%d): id=%d, note=%q\n",
callerDescr, nRows, id, note)
nRows++
}
if err := rows.Err(); err != nil {
log.Panic(err)
}
log.Printf("Total %d rows for %s.\n", nRows, callerDescr)
}

View File

@@ -0,0 +1,38 @@
package main
import (
"database/sql"
"fmt"
"log"
"github.com/mattn/go-sqlite3"
)
func main() {
sql.Register("sqlite3_with_extensions", &sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.CreateModule("github", &githubModule{})
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
_, err = db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query("select id, full_name, description, html_url from repo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id, fullName, description, htmlURL string
rows.Scan(&id, &fullName, &description, &htmlURL)
fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL)
}
}

View File

@@ -0,0 +1,111 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/mattn/go-sqlite3"
)
type githubRepo struct {
ID int `json:"id"`
FullName string `json:"full_name"`
Description string `json:"description"`
HTMLURL string `json:"html_url"`
}
type githubModule struct {
}
func (m *githubModule) Create(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) {
err := c.DeclareVTab(fmt.Sprintf(`
CREATE TABLE %s (
id INT,
full_name TEXT,
description TEXT,
html_url TEXT
)`, args[0]))
if err != nil {
return nil, err
}
return &ghRepoTable{}, nil
}
func (m *githubModule) Connect(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) {
return m.Create(c, args)
}
func (m *githubModule) DestroyModule() {}
type ghRepoTable struct {
repos []githubRepo
}
func (v *ghRepoTable) Open() (sqlite3.VTabCursor, error) {
resp, err := http.Get("https://api.github.com/repositories")
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var repos []githubRepo
if err := json.Unmarshal(body, &repos); err != nil {
return nil, err
}
return &ghRepoCursor{0, repos}, nil
}
func (v *ghRepoTable) BestIndex(cst []sqlite3.InfoConstraint, ob []sqlite3.InfoOrderBy) (*sqlite3.IndexResult, error) {
return &sqlite3.IndexResult{}, nil
}
func (v *ghRepoTable) Disconnect() error { return nil }
func (v *ghRepoTable) Destroy() error { return nil }
type ghRepoCursor struct {
index int
repos []githubRepo
}
func (vc *ghRepoCursor) Column(c *sqlite3.SQLiteContext, col int) error {
switch col {
case 0:
c.ResultInt(vc.repos[vc.index].ID)
case 1:
c.ResultText(vc.repos[vc.index].FullName)
case 2:
c.ResultText(vc.repos[vc.index].Description)
case 3:
c.ResultText(vc.repos[vc.index].HTMLURL)
}
return nil
}
func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
vc.index = 0
return nil
}
func (vc *ghRepoCursor) Next() error {
vc.index++
return nil
}
func (vc *ghRepoCursor) EOF() bool {
return vc.index >= len(vc.repos)
}
func (vc *ghRepoCursor) Rowid() (int64, error) {
return int64(vc.index), nil
}
func (vc *ghRepoCursor) Close() error {
return nil
}

85
vendor/github.com/mattn/go-sqlite3/backup.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"runtime"
"unsafe"
)
// SQLiteBackup implement interface of Backup.
type SQLiteBackup struct {
b *C.sqlite3_backup
}
// Backup make backup from src to dest.
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
destptr := C.CString(dest)
defer C.free(unsafe.Pointer(destptr))
srcptr := C.CString(src)
defer C.free(unsafe.Pointer(srcptr))
if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil {
bb := &SQLiteBackup{b: b}
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
return bb, nil
}
return nil, c.lastError()
}
// Step to backs up for one step. Calls the underlying `sqlite3_backup_step`
// function. This function returns a boolean indicating if the backup is done
// and an error signalling any other error. Done is returned if the underlying
// C function returns SQLITE_DONE (Code 101)
func (b *SQLiteBackup) Step(p int) (bool, error) {
ret := C.sqlite3_backup_step(b.b, C.int(p))
if ret == C.SQLITE_DONE {
return true, nil
} else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY {
return false, Error{Code: ErrNo(ret)}
}
return false, nil
}
// Remaining return whether have the rest for backup.
func (b *SQLiteBackup) Remaining() int {
return int(C.sqlite3_backup_remaining(b.b))
}
// PageCount return count of pages.
func (b *SQLiteBackup) PageCount() int {
return int(C.sqlite3_backup_pagecount(b.b))
}
// Finish close backup.
func (b *SQLiteBackup) Finish() error {
return b.Close()
}
// Close close backup.
func (b *SQLiteBackup) Close() error {
ret := C.sqlite3_backup_finish(b.b)
// sqlite3_backup_finish() never fails, it just returns the
// error code from previous operations, so clean up before
// checking and returning an error
b.b = nil
runtime.SetFinalizer(b, nil)
if ret != 0 {
return Error{Code: ErrNo(ret)}
}
return nil
}

290
vendor/github.com/mattn/go-sqlite3/backup_test.go generated vendored Normal file
View File

@@ -0,0 +1,290 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"fmt"
"os"
"testing"
"time"
)
// The number of rows of test data to create in the source database.
// Can be used to control how many pages are available to be backed up.
const testRowCount = 100
// The maximum number of seconds after which the page-by-page backup is considered to have taken too long.
const usePagePerStepsTimeoutSeconds = 30
// Test the backup functionality.
func testBackup(t *testing.T, testRowCount int, usePerPageSteps bool) {
// This function will be called multiple times.
// It uses sql.Register(), which requires the name parameter value to be unique.
// There does not currently appear to be a way to unregister a registered driver, however.
// So generate a database driver name that will likely be unique.
var driverName = fmt.Sprintf("sqlite3_testBackup_%v_%v_%v", testRowCount, usePerPageSteps, time.Now().UnixNano())
// The driver's connection will be needed in order to perform the backup.
driverConns := []*SQLiteConn{}
sql.Register(driverName, &SQLiteDriver{
ConnectHook: func(conn *SQLiteConn) error {
driverConns = append(driverConns, conn)
return nil
},
})
// Connect to the source database.
srcTempFilename := TempFilename(t)
defer os.Remove(srcTempFilename)
srcDb, err := sql.Open(driverName, srcTempFilename)
if err != nil {
t.Fatal("Failed to open the source database:", err)
}
defer srcDb.Close()
err = srcDb.Ping()
if err != nil {
t.Fatal("Failed to connect to the source database:", err)
}
// Connect to the destination database.
destTempFilename := TempFilename(t)
defer os.Remove(destTempFilename)
destDb, err := sql.Open(driverName, destTempFilename)
if err != nil {
t.Fatal("Failed to open the destination database:", err)
}
defer destDb.Close()
err = destDb.Ping()
if err != nil {
t.Fatal("Failed to connect to the destination database:", err)
}
// Check the driver connections.
if len(driverConns) != 2 {
t.Fatalf("Expected 2 driver connections, but found %v.", len(driverConns))
}
srcDbDriverConn := driverConns[0]
if srcDbDriverConn == nil {
t.Fatal("The source database driver connection is nil.")
}
destDbDriverConn := driverConns[1]
if destDbDriverConn == nil {
t.Fatal("The destination database driver connection is nil.")
}
// Generate some test data for the given ID.
var generateTestData = func(id int) string {
return fmt.Sprintf("test-%v", id)
}
// Populate the source database with a test table containing some test data.
tx, err := srcDb.Begin()
if err != nil {
t.Fatal("Failed to begin a transaction when populating the source database:", err)
}
_, err = srcDb.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
if err != nil {
tx.Rollback()
t.Fatal("Failed to create the source database \"test\" table:", err)
}
for id := 0; id < testRowCount; id++ {
_, err = srcDb.Exec("INSERT INTO test (id, value) VALUES (?, ?)", id, generateTestData(id))
if err != nil {
tx.Rollback()
t.Fatal("Failed to insert a row into the source database \"test\" table:", err)
}
}
err = tx.Commit()
if err != nil {
t.Fatal("Failed to populate the source database:", err)
}
// Confirm that the destination database is initially empty.
var destTableCount int
err = destDb.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").Scan(&destTableCount)
if err != nil {
t.Fatal("Failed to check the destination table count:", err)
}
if destTableCount != 0 {
t.Fatalf("The destination database is not empty; %v table(s) found.", destTableCount)
}
// Prepare to perform the backup.
backup, err := destDbDriverConn.Backup("main", srcDbDriverConn, "main")
if err != nil {
t.Fatal("Failed to initialize the backup:", err)
}
// Allow the initial page count and remaining values to be retrieved.
// According to <https://www.sqlite.org/c3ref/backup_finish.html>, the page count and remaining values are "... only updated by sqlite3_backup_step()."
isDone, err := backup.Step(0)
if err != nil {
t.Fatal("Unable to perform an initial 0-page backup step:", err)
}
if isDone {
t.Fatal("Backup is unexpectedly done.")
}
// Check that the page count and remaining values are reasonable.
initialPageCount := backup.PageCount()
if initialPageCount <= 0 {
t.Fatalf("Unexpected initial page count value: %v", initialPageCount)
}
initialRemaining := backup.Remaining()
if initialRemaining <= 0 {
t.Fatalf("Unexpected initial remaining value: %v", initialRemaining)
}
if initialRemaining != initialPageCount {
t.Fatalf("Initial remaining value differs from the initial page count value; remaining: %v; page count: %v", initialRemaining, initialPageCount)
}
// Perform the backup.
if usePerPageSteps {
var startTime = time.Now().Unix()
// Test backing-up using a page-by-page approach.
var latestRemaining = initialRemaining
for {
// Perform the backup step.
isDone, err = backup.Step(1)
if err != nil {
t.Fatal("Failed to perform a backup step:", err)
}
// The page count should remain unchanged from its initial value.
currentPageCount := backup.PageCount()
if currentPageCount != initialPageCount {
t.Fatalf("Current page count differs from the initial page count; initial page count: %v; current page count: %v", initialPageCount, currentPageCount)
}
// There should now be one less page remaining.
currentRemaining := backup.Remaining()
expectedRemaining := latestRemaining - 1
if currentRemaining != expectedRemaining {
t.Fatalf("Unexpected remaining value; expected remaining value: %v; actual remaining value: %v", expectedRemaining, currentRemaining)
}
latestRemaining = currentRemaining
if isDone {
break
}
// Limit the runtime of the backup attempt.
if (time.Now().Unix() - startTime) > usePagePerStepsTimeoutSeconds {
t.Fatal("Backup is taking longer than expected.")
}
}
} else {
// Test the copying of all remaining pages.
isDone, err = backup.Step(-1)
if err != nil {
t.Fatal("Failed to perform a backup step:", err)
}
if !isDone {
t.Fatal("Backup is unexpectedly not done.")
}
}
// Check that the page count and remaining values are reasonable.
finalPageCount := backup.PageCount()
if finalPageCount != initialPageCount {
t.Fatalf("Final page count differs from the initial page count; initial page count: %v; final page count: %v", initialPageCount, finalPageCount)
}
finalRemaining := backup.Remaining()
if finalRemaining != 0 {
t.Fatalf("Unexpected remaining value: %v", finalRemaining)
}
// Finish the backup.
err = backup.Finish()
if err != nil {
t.Fatal("Failed to finish backup:", err)
}
// Confirm that the "test" table now exists in the destination database.
var doesTestTableExist bool
err = destDb.QueryRow("SELECT EXISTS (SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = 'test' LIMIT 1) AS test_table_exists").Scan(&doesTestTableExist)
if err != nil {
t.Fatal("Failed to check if the \"test\" table exists in the destination database:", err)
}
if !doesTestTableExist {
t.Fatal("The \"test\" table could not be found in the destination database.")
}
// Confirm that the number of rows in the destination database's "test" table matches that of the source table.
var actualTestTableRowCount int
err = destDb.QueryRow("SELECT COUNT(*) FROM test").Scan(&actualTestTableRowCount)
if err != nil {
t.Fatal("Failed to determine the rowcount of the \"test\" table in the destination database:", err)
}
if testRowCount != actualTestTableRowCount {
t.Fatalf("Unexpected destination \"test\" table row count; expected: %v; found: %v", testRowCount, actualTestTableRowCount)
}
// Check each of the rows in the destination database.
for id := 0; id < testRowCount; id++ {
var checkedValue string
err = destDb.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&checkedValue)
if err != nil {
t.Fatal("Failed to query the \"test\" table in the destination database:", err)
}
var expectedValue = generateTestData(id)
if checkedValue != expectedValue {
t.Fatalf("Unexpected value in the \"test\" table in the destination database; expected value: %v; actual value: %v", expectedValue, checkedValue)
}
}
}
func TestBackupStepByStep(t *testing.T) {
testBackup(t, testRowCount, true)
}
func TestBackupAllRemainingPages(t *testing.T) {
testBackup(t, testRowCount, false)
}
// Test the error reporting when preparing to perform a backup.
func TestBackupError(t *testing.T) {
const driverName = "sqlite3_TestBackupError"
// The driver's connection will be needed in order to perform the backup.
var dbDriverConn *SQLiteConn
sql.Register(driverName, &SQLiteDriver{
ConnectHook: func(conn *SQLiteConn) error {
dbDriverConn = conn
return nil
},
})
// Connect to the database.
dbTempFilename := TempFilename(t)
defer os.Remove(dbTempFilename)
db, err := sql.Open(driverName, dbTempFilename)
if err != nil {
t.Fatal("Failed to open the database:", err)
}
defer db.Close()
db.Ping()
// Need the driver connection in order to perform the backup.
if dbDriverConn == nil {
t.Fatal("Failed to get the driver connection.")
}
// Prepare to perform the backup.
// Intentionally using the same connection for both the source and destination databases, to trigger an error result.
backup, err := dbDriverConn.Backup("main", dbDriverConn, "main")
if err == nil {
t.Fatal("Failed to get the expected error result.")
}
const expectedError = "source and destination must be distinct"
if err.Error() != expectedError {
t.Fatalf("Unexpected error message; expected value: \"%v\"; actual value: \"%v\"", expectedError, err.Error())
}
if backup != nil {
t.Fatal("Failed to get the expected nil backup result.")
}
}

358
vendor/github.com/mattn/go-sqlite3/callback.go generated vendored Normal file
View File

@@ -0,0 +1,358 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
// You can't export a Go function to C and have definitions in the C
// preamble in the same file, so we have to have callbackTrampoline in
// its own file. Because we need a separate file anyway, the support
// code for SQLite custom functions is in here.
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
*/
import "C"
import (
"errors"
"fmt"
"math"
"reflect"
"sync"
"unsafe"
)
//export callbackTrampoline
func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo)
fi.Call(ctx, args)
}
//export stepTrampoline
func stepTrampoline(ctx *C.sqlite3_context, argc C.int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:int(argc):int(argc)]
ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
ai.Step(ctx, args)
}
//export doneTrampoline
func doneTrampoline(ctx *C.sqlite3_context) {
handle := uintptr(C.sqlite3_user_data(ctx))
ai := lookupHandle(handle).(*aggInfo)
ai.Done(ctx)
}
//export commitHookTrampoline
func commitHookTrampoline(handle uintptr) int {
callback := lookupHandle(handle).(func() int)
return callback()
}
//export rollbackHookTrampoline
func rollbackHookTrampoline(handle uintptr) {
callback := lookupHandle(handle).(func())
callback()
}
//export updateHookTrampoline
func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, rowid int64) {
callback := lookupHandle(handle).(func(int, string, string, int64))
callback(op, C.GoString(db), C.GoString(table), rowid)
}
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
val interface{}
}
var handleLock sync.Mutex
var handleVals = make(map[uintptr]handleVal)
var handleIndex uintptr = 100
func newHandle(db *SQLiteConn, v interface{}) uintptr {
handleLock.Lock()
defer handleLock.Unlock()
i := handleIndex
handleIndex++
handleVals[i] = handleVal{db, v}
return i
}
func lookupHandle(handle uintptr) interface{} {
handleLock.Lock()
defer handleLock.Unlock()
r, ok := handleVals[handle]
if !ok {
if handle >= 100 && handle < handleIndex {
panic("deleted handle")
} else {
panic("invalid handle")
}
}
return r.val
}
func deleteHandles(db *SQLiteConn) {
handleLock.Lock()
defer handleLock.Unlock()
for handle, val := range handleVals {
if val.db == db {
delete(handleVals, handle)
}
}
}
// This is only here so that tests can refer to it.
type callbackArgRaw C.sqlite3_value
type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
type callbackArgCast struct {
f callbackArgConverter
typ reflect.Type
}
func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
val, err := c.f(v)
if err != nil {
return reflect.Value{}, err
}
if !val.Type().ConvertibleTo(c.typ) {
return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
}
return val.Convert(c.typ), nil
}
func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
}
func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
i := int64(C.sqlite3_value_int64(v))
val := false
if i != 0 {
val = true
}
return reflect.ValueOf(val), nil
}
func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
}
return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
}
func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := C.sqlite3_value_blob(v)
return reflect.ValueOf(C.GoBytes(p, l)), nil
case C.SQLITE_TEXT:
l := C.sqlite3_value_bytes(v)
c := unsafe.Pointer(C.sqlite3_value_text(v))
return reflect.ValueOf(C.GoBytes(c, l)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := (*C.char)(C.sqlite3_value_blob(v))
return reflect.ValueOf(C.GoStringN(p, l)), nil
case C.SQLITE_TEXT:
c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
return reflect.ValueOf(C.GoString(c)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_INTEGER:
return callbackArgInt64(v)
case C.SQLITE_FLOAT:
return callbackArgFloat64(v)
case C.SQLITE_TEXT:
return callbackArgString(v)
case C.SQLITE_BLOB:
return callbackArgBytes(v)
case C.SQLITE_NULL:
// Interpret NULL as a nil byte slice.
var ret []byte
return reflect.ValueOf(ret), nil
default:
panic("unreachable")
}
}
func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
return nil, errors.New("the only supported interface type is interface{}")
}
return callbackArgGeneric, nil
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackArgBytes, nil
case reflect.String:
return callbackArgString, nil
case reflect.Bool:
return callbackArgBool, nil
case reflect.Int64:
return callbackArgInt64, nil
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
c := callbackArgCast{callbackArgInt64, typ}
return c.Run, nil
case reflect.Float64:
return callbackArgFloat64, nil
case reflect.Float32:
c := callbackArgCast{callbackArgFloat64, typ}
return c.Run, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
var args []reflect.Value
if len(argv) < len(converters) {
return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
}
for i, arg := range argv[:len(converters)] {
v, err := converters[i](arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
if variadic != nil {
for _, arg := range argv[len(converters):] {
v, err := variadic(arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
}
return args, nil
}
type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Int64:
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
v = v.Convert(reflect.TypeOf(int64(0)))
case reflect.Bool:
b := v.Interface().(bool)
if b {
v = reflect.ValueOf(int64(1))
} else {
v = reflect.ValueOf(int64(0))
}
default:
return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
}
C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
return nil
}
func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Float64:
case reflect.Float32:
v = v.Convert(reflect.TypeOf(float64(0)))
default:
return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
}
C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
return nil
}
func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
return fmt.Errorf("cannot convert %s to BLOB", v.Type())
}
i := v.Interface()
if i == nil || len(i.([]byte)) == 0 {
C.sqlite3_result_null(ctx)
} else {
bs := i.([]byte)
C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
}
return nil
}
func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.String {
return fmt.Errorf("cannot convert %s to TEXT", v.Type())
}
C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
return nil
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
switch typ.Kind() {
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackRetBlob, nil
case reflect.String:
return callbackRetText, nil
case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
return callbackRetInteger, nil
case reflect.Float32, reflect.Float64:
return callbackRetFloat, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, -1)
}
// Test support code. Tests are not allowed to import "C", so we can't
// declare any functions that use C.sqlite3_value.
func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
return func(*C.sqlite3_value) (reflect.Value, error) {
return v, err
}
}

97
vendor/github.com/mattn/go-sqlite3/callback_test.go generated vendored Normal file
View File

@@ -0,0 +1,97 @@
package sqlite3
import (
"errors"
"math"
"reflect"
"testing"
)
func TestCallbackArgCast(t *testing.T) {
intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil)
floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil)
errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test"))
tests := []struct {
f callbackArgConverter
o reflect.Value
}{
{intConv, reflect.ValueOf(int8(-1))},
{intConv, reflect.ValueOf(int16(-1))},
{intConv, reflect.ValueOf(int32(-1))},
{intConv, reflect.ValueOf(uint8(math.MaxUint8))},
{intConv, reflect.ValueOf(uint16(math.MaxUint16))},
{intConv, reflect.ValueOf(uint32(math.MaxUint32))},
// Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1
{intConv, reflect.ValueOf(uint64(math.MaxInt64))},
{floatConv, reflect.ValueOf(float32(math.Inf(1)))},
}
for _, test := range tests {
conv := callbackArgCast{test.f, test.o.Type()}
val, err := conv.Run(nil)
if err != nil {
t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err)
} else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) {
t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface())
}
}
conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))}
_, err := conv.Run(nil)
if err == nil {
t.Errorf("Expected error during callbackArgCast, but got none")
}
}
func TestCallbackConverters(t *testing.T) {
tests := []struct {
v interface{}
err bool
}{
// Unfortunately, we can't tell which converter was returned,
// but we can at least check which types can be converted.
{[]byte{0}, false},
{"text", false},
{true, false},
{int8(0), false},
{int16(0), false},
{int32(0), false},
{int64(0), false},
{uint8(0), false},
{uint16(0), false},
{uint32(0), false},
{uint64(0), false},
{int(0), false},
{uint(0), false},
{float64(0), false},
{float32(0), false},
{func() {}, true},
{complex64(complex(0, 0)), true},
{complex128(complex(0, 0)), true},
{struct{}{}, true},
{map[string]string{}, true},
{[]string{}, true},
{(*int8)(nil), true},
{make(chan int), true},
}
for _, test := range tests {
_, err := callbackArg(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
for _, test := range tests {
_, err := callbackRet(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
}

112
vendor/github.com/mattn/go-sqlite3/doc.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
/*
Package sqlite3 provides interface to SQLite3 databases.
This works as a driver for database/sql.
Installation
go get github.com/mattn/go-sqlite3
Supported Types
Currently, go-sqlite3 supports the following data types.
+------------------------------+
|go | sqlite3 |
|----------|-------------------|
|nil | null |
|int | integer |
|int64 | integer |
|float64 | float |
|bool | integer |
|[]byte | blob |
|string | text |
|time.Time | timestamp/datetime|
+------------------------------+
SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg,
const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
(void*)db, regexp_func, NULL, NULL);
}
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
Connection Hook
You can hook and inject your code when the connection is established. database/sql
doesn't provide a way to get native go-sqlite3 interfaces. So if you want,
you need to set ConnectHook and get the SQLiteConn.
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions,
call RegisterFunction from ConnectHook.
regex = func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
sql.Register("sqlite3_with_go_func",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.RegisterFunc("regexp", regex, true)
},
})
See the documentation of RegisterFunc for more details.
*/
package sqlite3

135
vendor/github.com/mattn/go-sqlite3/error.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import "C"
// ErrNo inherit errno.
type ErrNo int
// ErrNoMask is mask code.
const ErrNoMask C.int = 0xff
// ErrNoExtended is extended errno.
type ErrNoExtended int
// Error implement sqlite error code.
type Error struct {
Code ErrNo /* The error code returned by SQLite */
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
err string /* The error string returned by sqlite3_errmsg(),
this usually contains more specific details. */
}
// result codes from http://www.sqlite.org/c3ref/c_abort.html
var (
ErrError = ErrNo(1) /* SQL error or missing database */
ErrInternal = ErrNo(2) /* Internal logic error in SQLite */
ErrPerm = ErrNo(3) /* Access permission denied */
ErrAbort = ErrNo(4) /* Callback routine requested an abort */
ErrBusy = ErrNo(5) /* The database file is locked */
ErrLocked = ErrNo(6) /* A table in the database is locked */
ErrNomem = ErrNo(7) /* A malloc() failed */
ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */
ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */
ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */
ErrCorrupt = ErrNo(11) /* The database disk image is malformed */
ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */
ErrFull = ErrNo(13) /* Insertion failed because database is full */
ErrCantOpen = ErrNo(14) /* Unable to open the database file */
ErrProtocol = ErrNo(15) /* Database lock protocol error */
ErrEmpty = ErrNo(16) /* Database is empty */
ErrSchema = ErrNo(17) /* The database schema changed */
ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */
ErrConstraint = ErrNo(19) /* Abort due to constraint violation */
ErrMismatch = ErrNo(20) /* Data type mismatch */
ErrMisuse = ErrNo(21) /* Library used incorrectly */
ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */
ErrAuth = ErrNo(23) /* Authorization denied */
ErrFormat = ErrNo(24) /* Auxiliary database format error */
ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */
ErrNotADB = ErrNo(26) /* File opened that is not a database file */
ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
)
// Error return error message from errno.
func (err ErrNo) Error() string {
return Error{Code: err}.Error()
}
// Extend return extended errno.
func (err ErrNo) Extend(by int) ErrNoExtended {
return ErrNoExtended(int(err) | (by << 8))
}
// Error return error message that is extended code.
func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
func (err Error) Error() string {
if err.err != "" {
return err.err
}
return errorString(err)
}
// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
var (
ErrIoErrRead = ErrIoErr.Extend(1)
ErrIoErrShortRead = ErrIoErr.Extend(2)
ErrIoErrWrite = ErrIoErr.Extend(3)
ErrIoErrFsync = ErrIoErr.Extend(4)
ErrIoErrDirFsync = ErrIoErr.Extend(5)
ErrIoErrTruncate = ErrIoErr.Extend(6)
ErrIoErrFstat = ErrIoErr.Extend(7)
ErrIoErrUnlock = ErrIoErr.Extend(8)
ErrIoErrRDlock = ErrIoErr.Extend(9)
ErrIoErrDelete = ErrIoErr.Extend(10)
ErrIoErrBlocked = ErrIoErr.Extend(11)
ErrIoErrNoMem = ErrIoErr.Extend(12)
ErrIoErrAccess = ErrIoErr.Extend(13)
ErrIoErrCheckReservedLock = ErrIoErr.Extend(14)
ErrIoErrLock = ErrIoErr.Extend(15)
ErrIoErrClose = ErrIoErr.Extend(16)
ErrIoErrDirClose = ErrIoErr.Extend(17)
ErrIoErrSHMOpen = ErrIoErr.Extend(18)
ErrIoErrSHMSize = ErrIoErr.Extend(19)
ErrIoErrSHMLock = ErrIoErr.Extend(20)
ErrIoErrSHMMap = ErrIoErr.Extend(21)
ErrIoErrSeek = ErrIoErr.Extend(22)
ErrIoErrDeleteNoent = ErrIoErr.Extend(23)
ErrIoErrMMap = ErrIoErr.Extend(24)
ErrIoErrGetTempPath = ErrIoErr.Extend(25)
ErrIoErrConvPath = ErrIoErr.Extend(26)
ErrLockedSharedCache = ErrLocked.Extend(1)
ErrBusyRecovery = ErrBusy.Extend(1)
ErrBusySnapshot = ErrBusy.Extend(2)
ErrCantOpenNoTempDir = ErrCantOpen.Extend(1)
ErrCantOpenIsDir = ErrCantOpen.Extend(2)
ErrCantOpenFullPath = ErrCantOpen.Extend(3)
ErrCantOpenConvPath = ErrCantOpen.Extend(4)
ErrCorruptVTab = ErrCorrupt.Extend(1)
ErrReadonlyRecovery = ErrReadonly.Extend(1)
ErrReadonlyCantLock = ErrReadonly.Extend(2)
ErrReadonlyRollback = ErrReadonly.Extend(3)
ErrReadonlyDbMoved = ErrReadonly.Extend(4)
ErrAbortRollback = ErrAbort.Extend(2)
ErrConstraintCheck = ErrConstraint.Extend(1)
ErrConstraintCommitHook = ErrConstraint.Extend(2)
ErrConstraintForeignKey = ErrConstraint.Extend(3)
ErrConstraintFunction = ErrConstraint.Extend(4)
ErrConstraintNotNull = ErrConstraint.Extend(5)
ErrConstraintPrimaryKey = ErrConstraint.Extend(6)
ErrConstraintTrigger = ErrConstraint.Extend(7)
ErrConstraintUnique = ErrConstraint.Extend(8)
ErrConstraintVTab = ErrConstraint.Extend(9)
ErrConstraintRowID = ErrConstraint.Extend(10)
ErrNoticeRecoverWAL = ErrNotice.Extend(1)
ErrNoticeRecoverRollback = ErrNotice.Extend(2)
ErrWarningAutoIndex = ErrWarning.Extend(1)
)

242
vendor/github.com/mattn/go-sqlite3/error_test.go generated vendored Normal file
View File

@@ -0,0 +1,242 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"io/ioutil"
"os"
"path"
"testing"
)
func TestSimpleError(t *testing.T) {
e := ErrError.Error()
if e != "SQL logic error or missing database" {
t.Error("wrong error code:" + e)
}
}
func TestCorruptDbErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
f, err := os.Create(dbFileName)
if err != nil {
t.Error(err)
}
f.Write([]byte{1, 2, 3, 4, 5})
f.Close()
db, err := sql.Open("sqlite3", dbFileName)
if err == nil {
_, err = db.Exec("drop table foo")
}
sqliteErr := err.(Error)
if sqliteErr.Code != ErrNotADB {
t.Error("wrong error code for corrupted DB")
}
if err.Error() == "" {
t.Error("wrong error string for corrupted DB")
}
db.Close()
}
func TestSqlLogicErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err != nil {
t.Error(err)
}
const expectedErr = "table Foo already exists"
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err.Error() != expectedErr {
t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr)
}
}
func TestExtendedErrorCodes_ForeignKey(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);")
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintForeignKey {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintForeignKey)
}
}
}
func TestExtendedErrorCodes_NotNull(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintNotNull {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintNotNull)
}
}
}
func TestExtendedErrorCodes_Unique(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintUnique {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintUnique)
}
extended := sqliteErr.Code.Extend(3).Error()
expected := "constraint failed"
if extended != expected {
t.Errorf("Wrong basic error code: %q != %q",
extended, expected)
}
}
}

201408
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c generated vendored Normal file

File diff suppressed because it is too large Load Diff

10475
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h generated vendored Normal file

File diff suppressed because it is too large Load Diff

1083
vendor/github.com/mattn/go-sqlite3/sqlite3.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

103
vendor/github.com/mattn/go-sqlite3/sqlite3_context.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
// These wrappers are necessary because SQLITE_TRANSIENT
// is a pointer constant, and cgo doesn't translate them correctly.
static inline void my_result_text(sqlite3_context *ctx, char *p, int np) {
sqlite3_result_text(ctx, p, np, SQLITE_TRANSIENT);
}
static inline void my_result_blob(sqlite3_context *ctx, void *p, int np) {
sqlite3_result_blob(ctx, p, np, SQLITE_TRANSIENT);
}
*/
import "C"
import (
"math"
"reflect"
"unsafe"
)
const i64 = unsafe.Sizeof(int(0)) > 4
// SQLiteContext behave sqlite3_context
type SQLiteContext C.sqlite3_context
// ResultBool sets the result of an SQL function.
func (c *SQLiteContext) ResultBool(b bool) {
if b {
c.ResultInt(1)
} else {
c.ResultInt(0)
}
}
// ResultBlob sets the result of an SQL function.
// See: sqlite3_result_blob, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultBlob(b []byte) {
if i64 && len(b) > math.MaxInt32 {
C.sqlite3_result_error_toobig((*C.sqlite3_context)(c))
return
}
var p *byte
if len(b) > 0 {
p = &b[0]
}
C.my_result_blob((*C.sqlite3_context)(c), unsafe.Pointer(p), C.int(len(b)))
}
// ResultDouble sets the result of an SQL function.
// See: sqlite3_result_double, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultDouble(d float64) {
C.sqlite3_result_double((*C.sqlite3_context)(c), C.double(d))
}
// ResultInt sets the result of an SQL function.
// See: sqlite3_result_int, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultInt(i int) {
if i64 && (i > math.MaxInt32 || i < math.MinInt32) {
C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
} else {
C.sqlite3_result_int((*C.sqlite3_context)(c), C.int(i))
}
}
// ResultInt64 sets the result of an SQL function.
// See: sqlite3_result_int64, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultInt64(i int64) {
C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
}
// ResultNull sets the result of an SQL function.
// See: sqlite3_result_null, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultNull() {
C.sqlite3_result_null((*C.sqlite3_context)(c))
}
// ResultText sets the result of an SQL function.
// See: sqlite3_result_text, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultText(s string) {
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
cs, l := (*C.char)(unsafe.Pointer(h.Data)), C.int(h.Len)
C.my_result_text((*C.sqlite3_context)(c), cs, l)
}
// ResultZeroblob sets the result of an SQL function.
// See: sqlite3_result_zeroblob, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultZeroblob(n int) {
C.sqlite3_result_zeroblob((*C.sqlite3_context)(c), C.int(n))
}

130
vendor/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go generated vendored Normal file
View File

@@ -0,0 +1,130 @@
// Copyright (C) 2015 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"os"
"testing"
)
func TestFTS3(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)")
if err != nil {
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
for rows.Next() {
var id int
var value string
if err := rows.Scan(&id, &value); err != nil {
t.Error("Unable to scan results:", err)
continue
}
if id == 1 && value != `今日の 晩御飯は 天麩羅よ` {
t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value)
} else if id == 2 && value != `今日は いい 天気だ` {
t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value)
}
}
rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `今日の 晩御飯は 天麩羅よ` {
t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}
func TestFTS4(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts4(tokenize=unicode61, id INTEGER PRIMARY KEY, value TEXT)")
switch {
case err != nil && err.Error() == "unknown tokenizer: unicode61":
t.Skip("FTS4 not supported")
case err != nil:
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `février`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT value FROM foo WHERE value MATCH 'fevrier'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `février` {
t.Fatal("Value should be `février`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}

13
vendor/github.com/mattn/go-sqlite3/sqlite3_fts5.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build fts5
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_FTS5
#cgo LDFLAGS: -lm
*/
import "C"

69
vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build go1.8
package sqlite3
import (
"database/sql/driver"
"errors"
"context"
)
// Ping implement Pinger.
func (c *SQLiteConn) Ping(ctx context.Context) error {
if c.db == nil {
return errors.New("Connection was closed")
}
return nil
}
// QueryContext implement QueryerContext.
func (c *SQLiteConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
}
return c.query(ctx, query, list)
}
// ExecContext implement ExecerContext.
func (c *SQLiteConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
}
return c.exec(ctx, query, list)
}
// PrepareContext implement ConnPrepareContext.
func (c *SQLiteConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
return c.prepare(ctx, query)
}
// BeginTx implement ConnBeginTx.
func (c *SQLiteConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
return c.begin(ctx)
}
// QueryContext implement QueryerContext.
func (s *SQLiteStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
}
return s.query(ctx, list)
}
// ExecContext implement ExecerContext.
func (s *SQLiteStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
list := make([]namedValue, len(args))
for i, nv := range args {
list[i] = namedValue(nv)
}
return s.exec(ctx, list)
}

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build go1.8
package sqlite3
import (
"database/sql"
"os"
"testing"
)
func TestNamedParams(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec(`
create table foo (id integer, name text, extra text);
`)
if err != nil {
t.Error("Failed to call db.Query:", err)
}
_, err = db.Exec(`insert into foo(id, name, extra) values(:id, :name, :name)`, sql.Named("name", "foo"), sql.Named("id", 1))
if err != nil {
t.Error("Failed to call db.Exec:", err)
}
row := db.QueryRow(`select id, extra from foo where id = :id and extra = :extra`, sql.Named("id", 1), sql.Named("extra", "foo"))
if row == nil {
t.Error("Failed to call db.QueryRow")
}
var id int
var extra string
err = row.Scan(&id, &extra)
if err != nil {
t.Error("Failed to db.Scan:", err)
}
if id != 1 || extra != "foo" {
t.Error("Failed to db.QueryRow: not matched results")
}
}

13
vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build icu
package sqlite3
/*
#cgo LDFLAGS: -licuuc -licui18n
#cgo CFLAGS: -DSQLITE_ENABLE_ICU
*/
import "C"

12
vendor/github.com/mattn/go-sqlite3/sqlite3_json1.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build json1
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_JSON1
*/
import "C"

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build libsqlite3
package sqlite3
/*
#cgo CFLAGS: -DUSE_LIBSQLITE3
#cgo linux LDFLAGS: -lsqlite3
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
*/
import "C"

View File

@@ -0,0 +1,69 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !sqlite_omit_load_extension
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"errors"
"unsafe"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
for _, extension := range extensions {
cext := C.CString(extension)
defer C.free(unsafe.Pointer(cext))
rv = C.sqlite3_load_extension(c.db, cext, nil, nil)
if rv != C.SQLITE_OK {
C.sqlite3_enable_load_extension(c.db, 0)
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}
// LoadExtension load the sqlite3 extension.
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
clib := C.CString(lib)
defer C.free(unsafe.Pointer(clib))
centry := C.CString(entry)
defer C.free(unsafe.Pointer(centry))
rv = C.sqlite3_load_extension(c.db, clib, centry, nil)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}

View File

@@ -0,0 +1,23 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build sqlite_omit_load_extension
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION
*/
import "C"
import (
"errors"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
return errors.New("Extensions have been disabled for static builds")
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
return errors.New("Extensions have been disabled for static builds")
}

13
vendor/github.com/mattn/go-sqlite3/sqlite3_other.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package sqlite3
/*
#cgo CFLAGS: -I.
#cgo linux LDFLAGS: -ldl
*/
import "C"

1362
vendor/github.com/mattn/go-sqlite3/sqlite3_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,423 @@
package sqlite3_test
import (
"database/sql"
"fmt"
"math/rand"
"regexp"
"strconv"
"sync"
"testing"
"time"
)
// Dialect is a type of dialect of databases.
type Dialect int
// Dialects for databases.
const (
SQLITE Dialect = iota // SQLITE mean SQLite3 dialect
POSTGRESQL // POSTGRESQL mean PostgreSQL dialect
MYSQL // MYSQL mean MySQL dialect
)
// DB provide context for the tests
type DB struct {
*testing.T
*sql.DB
dialect Dialect
once sync.Once
}
var db *DB
// the following tables will be created and dropped during the test
var testTables = []string{"foo", "bar", "t", "bench"}
var tests = []testing.InternalTest{
{Name: "TestBlobs", F: TestBlobs},
{Name: "TestManyQueryRow", F: TestManyQueryRow},
{Name: "TestTxQuery", F: TestTxQuery},
{Name: "TestPreparedStmt", F: TestPreparedStmt},
}
var benchmarks = []testing.InternalBenchmark{
{Name: "BenchmarkExec", F: BenchmarkExec},
{Name: "BenchmarkQuery", F: BenchmarkQuery},
{Name: "BenchmarkParams", F: BenchmarkParams},
{Name: "BenchmarkStmt", F: BenchmarkStmt},
{Name: "BenchmarkRows", F: BenchmarkRows},
{Name: "BenchmarkStmtRows", F: BenchmarkStmtRows},
}
// RunTests runs the SQL test suite
func RunTests(t *testing.T, d *sql.DB, dialect Dialect) {
db = &DB{t, d, dialect, sync.Once{}}
testing.RunTests(func(string, string) (bool, error) { return true, nil }, tests)
if !testing.Short() {
for _, b := range benchmarks {
fmt.Printf("%-20s", b.Name)
r := testing.Benchmark(b.F)
fmt.Printf("%10d %10.0f req/s\n", r.N, float64(r.N)/r.T.Seconds())
}
}
db.tearDown()
}
func (db *DB) mustExec(sql string, args ...interface{}) sql.Result {
res, err := db.Exec(sql, args...)
if err != nil {
db.Fatalf("Error running %q: %v", sql, err)
}
return res
}
func (db *DB) tearDown() {
for _, tbl := range testTables {
switch db.dialect {
case SQLITE:
db.mustExec("drop table if exists " + tbl)
case MYSQL, POSTGRESQL:
db.mustExec("drop table if exists " + tbl)
default:
db.Fatal("unknown dialect")
}
}
}
// q replaces ? parameters if needed
func (db *DB) q(sql string) string {
switch db.dialect {
case POSTGRESQL: // repace with $1, $2, ..
qrx := regexp.MustCompile(`\?`)
n := 0
return qrx.ReplaceAllStringFunc(sql, func(string) string {
n++
return "$" + strconv.Itoa(n)
})
}
return sql
}
func (db *DB) blobType(size int) string {
switch db.dialect {
case SQLITE:
return fmt.Sprintf("blob[%d]", size)
case POSTGRESQL:
return "bytea"
case MYSQL:
return fmt.Sprintf("VARBINARY(%d)", size)
}
panic("unknown dialect")
}
func (db *DB) serialPK() string {
switch db.dialect {
case SQLITE:
return "integer primary key autoincrement"
case POSTGRESQL:
return "serial primary key"
case MYSQL:
return "integer primary key auto_increment"
}
panic("unknown dialect")
}
func (db *DB) now() string {
switch db.dialect {
case SQLITE:
return "datetime('now')"
case POSTGRESQL:
return "now()"
case MYSQL:
return "now()"
}
panic("unknown dialect")
}
func makeBench() {
if _, err := db.Exec("create table bench (n varchar(32), i integer, d double, s varchar(32), t datetime)"); err != nil {
panic(err)
}
st, err := db.Prepare("insert into bench values (?, ?, ?, ?, ?)")
if err != nil {
panic(err)
}
defer st.Close()
for i := 0; i < 100; i++ {
if _, err = st.Exec(nil, i, float64(i), fmt.Sprintf("%d", i), time.Now()); err != nil {
panic(err)
}
}
}
// TestResult is test for result
func TestResult(t *testing.T) {
db.tearDown()
db.mustExec("create temporary table test (id " + db.serialPK() + ", name varchar(10))")
for i := 1; i < 3; i++ {
r := db.mustExec(db.q("insert into test (name) values (?)"), fmt.Sprintf("row %d", i))
n, err := r.RowsAffected()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("got %v, want %v", n, 1)
}
n, err = r.LastInsertId()
if err != nil {
t.Fatal(err)
}
if n != int64(i) {
t.Errorf("got %v, want %v", n, i)
}
}
if _, err := db.Exec("error!"); err == nil {
t.Fatalf("expected error")
}
}
// TestBlobs is test for blobs
func TestBlobs(t *testing.T) {
db.tearDown()
var blob = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
db.mustExec("create table foo (id integer primary key, bar " + db.blobType(16) + ")")
db.mustExec(db.q("insert into foo (id, bar) values(?,?)"), 0, blob)
want := fmt.Sprintf("%x", blob)
b := make([]byte, 16)
err := db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&b)
got := fmt.Sprintf("%x", b)
if err != nil {
t.Errorf("[]byte scan: %v", err)
} else if got != want {
t.Errorf("for []byte, got %q; want %q", got, want)
}
err = db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&got)
want = string(blob)
if err != nil {
t.Errorf("string scan: %v", err)
} else if got != want {
t.Errorf("for string, got %q; want %q", got, want)
}
}
// TestManyQueryRow is test for many query row
func TestManyQueryRow(t *testing.T) {
if testing.Short() {
t.Log("skipping in short mode")
return
}
db.tearDown()
db.mustExec("create table foo (id integer primary key, name varchar(50))")
db.mustExec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
var name string
for i := 0; i < 10000; i++ {
err := db.QueryRow(db.q("select name from foo where id = ?"), 1).Scan(&name)
if err != nil || name != "bob" {
t.Fatalf("on query %d: err=%v, name=%q", i, err, name)
}
}
}
// TestTxQuery is test for transactional query
func TestTxQuery(t *testing.T) {
db.tearDown()
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Exec("create table foo (id integer primary key, name varchar(50))")
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
if err != nil {
t.Fatal(err)
}
r, err := tx.Query(db.q("select name from foo where id = ?"), 1)
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(err)
}
t.Fatal("expected one rows")
}
var name string
err = r.Scan(&name)
if err != nil {
t.Fatal(err)
}
}
// TestPreparedStmt is test for prepared statement
func TestPreparedStmt(t *testing.T) {
db.tearDown()
db.mustExec("CREATE TABLE t (count INT)")
sel, err := db.Prepare("SELECT count FROM t ORDER BY count DESC")
if err != nil {
t.Fatalf("prepare 1: %v", err)
}
ins, err := db.Prepare(db.q("INSERT INTO t (count) VALUES (?)"))
if err != nil {
t.Fatalf("prepare 2: %v", err)
}
for n := 1; n <= 3; n++ {
if _, err := ins.Exec(n); err != nil {
t.Fatalf("insert(%d) = %v", n, err)
}
}
const nRuns = 10
var wg sync.WaitGroup
for i := 0; i < nRuns; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 10; j++ {
count := 0
if err := sel.QueryRow().Scan(&count); err != nil && err != sql.ErrNoRows {
t.Errorf("Query: %v", err)
return
}
if _, err := ins.Exec(rand.Intn(100)); err != nil {
t.Errorf("Insert: %v", err)
return
}
}
}()
}
wg.Wait()
}
// Benchmarks need to use panic() since b.Error errors are lost when
// running via testing.Benchmark() I would like to run these via go
// test -bench but calling Benchmark() from a benchmark test
// currently hangs go.
// BenchmarkExec is benchmark for exec
func BenchmarkExec(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := db.Exec("select 1"); err != nil {
panic(err)
}
}
}
// BenchmarkQuery is benchmark for query
func BenchmarkQuery(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select null, 1, 1.1, 'foo'").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
// BenchmarkParams is benchmark for params
func BenchmarkParams(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select ?, ?, ?, ?", nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
// BenchmarkStmt is benchmark for statement
func BenchmarkStmt(b *testing.B) {
st, err := db.Prepare("select ?, ?, ?, ?")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := st.QueryRow(nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
// BenchmarkRows is benchmark for rows
func BenchmarkRows(b *testing.B) {
db.once.Do(makeBench)
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := db.Query("select * from bench")
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}
// BenchmarkStmtRows is benchmark for statement rows
func BenchmarkStmtRows(b *testing.B) {
db.once.Do(makeBench)
st, err := db.Prepare("select * from bench")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := st.Query()
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}

414
vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go generated vendored Normal file
View File

@@ -0,0 +1,414 @@
// Copyright (C) 2016 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build trace
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
void stepTrampoline(sqlite3_context*, int, sqlite3_value**);
void doneTrampoline(sqlite3_context*);
int traceCallbackTrampoline(unsigned int traceEventCode, void *ctx, void *p, void *x);
*/
import "C"
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
"unsafe"
)
// Trace... constants identify the possible events causing callback invocation.
// Values are same as the corresponding SQLite Trace Event Codes.
const (
TraceStmt = C.SQLITE_TRACE_STMT
TraceProfile = C.SQLITE_TRACE_PROFILE
TraceRow = C.SQLITE_TRACE_ROW
TraceClose = C.SQLITE_TRACE_CLOSE
)
type TraceInfo struct {
// Pack together the shorter fields, to keep the struct smaller.
// On a 64-bit machine there would be padding
// between EventCode and ConnHandle; having AutoCommit here is "free":
EventCode uint32
AutoCommit bool
ConnHandle uintptr
// Usually filled, unless EventCode = TraceClose = SQLITE_TRACE_CLOSE:
// identifier for a prepared statement:
StmtHandle uintptr
// Two strings filled when EventCode = TraceStmt = SQLITE_TRACE_STMT:
// (1) either the unexpanded SQL text of the prepared statement, or
// an SQL comment that indicates the invocation of a trigger;
// (2) expanded SQL, if requested and if (1) is not an SQL comment.
StmtOrTrigger string
ExpandedSQL string // only if requested (TraceConfig.WantExpandedSQL = true)
// filled when EventCode = TraceProfile = SQLITE_TRACE_PROFILE:
// estimated number of nanoseconds that the prepared statement took to run:
RunTimeNanosec int64
DBError Error
}
// TraceUserCallback gives the signature for a trace function
// provided by the user (Go application programmer).
// SQLite 3.14 documentation (as of September 2, 2016)
// for SQL Trace Hook = sqlite3_trace_v2():
// The integer return value from the callback is currently ignored,
// though this may change in future releases. Callback implementations
// should return zero to ensure future compatibility.
type TraceUserCallback func(TraceInfo) int
type TraceConfig struct {
Callback TraceUserCallback
EventMask C.uint
WantExpandedSQL bool
}
func fillDBError(dbErr *Error, db *C.sqlite3) {
// See SQLiteConn.lastError(), in file 'sqlite3.go' at the time of writing (Sept 5, 2016)
dbErr.Code = ErrNo(C.sqlite3_errcode(db))
dbErr.ExtendedCode = ErrNoExtended(C.sqlite3_extended_errcode(db))
dbErr.err = C.GoString(C.sqlite3_errmsg(db))
}
func fillExpandedSQL(info *TraceInfo, db *C.sqlite3, pStmt unsafe.Pointer) {
if pStmt == nil {
panic("No SQLite statement pointer in P arg of trace_v2 callback")
}
expSQLiteCStr := C.sqlite3_expanded_sql((*C.sqlite3_stmt)(pStmt))
if expSQLiteCStr == nil {
fillDBError(&info.DBError, db)
return
}
info.ExpandedSQL = C.GoString(expSQLiteCStr)
}
//export traceCallbackTrampoline
func traceCallbackTrampoline(
traceEventCode C.uint,
// Parameter named 'C' in SQLite docs = Context given at registration:
ctx unsafe.Pointer,
// Parameter named 'P' in SQLite docs (Primary event data?):
p unsafe.Pointer,
// Parameter named 'X' in SQLite docs (eXtra event data?):
xValue unsafe.Pointer) C.int {
if ctx == nil {
panic(fmt.Sprintf("No context (ev 0x%x)", traceEventCode))
}
contextDB := (*C.sqlite3)(ctx)
connHandle := uintptr(ctx)
var traceConf TraceConfig
var found bool
if traceEventCode == TraceClose {
// clean up traceMap: 'pop' means get and delete
traceConf, found = popTraceMapping(connHandle)
} else {
traceConf, found = lookupTraceMapping(connHandle)
}
if !found {
panic(fmt.Sprintf("Mapping not found for handle 0x%x (ev 0x%x)",
connHandle, traceEventCode))
}
var info TraceInfo
info.EventCode = uint32(traceEventCode)
info.AutoCommit = (int(C.sqlite3_get_autocommit(contextDB)) != 0)
info.ConnHandle = connHandle
switch traceEventCode {
case TraceStmt:
info.StmtHandle = uintptr(p)
var xStr string
if xValue != nil {
xStr = C.GoString((*C.char)(xValue))
}
info.StmtOrTrigger = xStr
if !strings.HasPrefix(xStr, "--") {
// Not SQL comment, therefore the current event
// is not related to a trigger.
// The user might want to receive the expanded SQL;
// let's check:
if traceConf.WantExpandedSQL {
fillExpandedSQL(&info, contextDB, p)
}
}
case TraceProfile:
info.StmtHandle = uintptr(p)
if xValue == nil {
panic("NULL pointer in X arg of trace_v2 callback for SQLITE_TRACE_PROFILE event")
}
info.RunTimeNanosec = *(*int64)(xValue)
// sample the error //TODO: is it safe? is it useful?
fillDBError(&info.DBError, contextDB)
case TraceRow:
info.StmtHandle = uintptr(p)
case TraceClose:
handle := uintptr(p)
if handle != info.ConnHandle {
panic(fmt.Sprintf("Different conn handle 0x%x (expected 0x%x) in SQLITE_TRACE_CLOSE event.",
handle, info.ConnHandle))
}
default:
// Pass unsupported events to the user callback (if configured);
// let the user callback decide whether to panic or ignore them.
}
// Do not execute user callback when the event was not requested by user!
// Remember that the Close event is always selected when
// registering this callback trampoline with SQLite --- for cleanup.
// In the future there may be more events forced to "selected" in SQLite
// for the driver's needs.
if traceConf.EventMask&traceEventCode == 0 {
return 0
}
r := 0
if traceConf.Callback != nil {
r = traceConf.Callback(info)
}
return C.int(r)
}
type traceMapEntry struct {
config TraceConfig
}
var traceMapLock sync.Mutex
var traceMap = make(map[uintptr]traceMapEntry)
func addTraceMapping(connHandle uintptr, traceConf TraceConfig) {
traceMapLock.Lock()
defer traceMapLock.Unlock()
oldEntryCopy, found := traceMap[connHandle]
if found {
panic(fmt.Sprintf("Adding trace config %v: handle 0x%x already registered (%v).",
traceConf, connHandle, oldEntryCopy.config))
}
traceMap[connHandle] = traceMapEntry{config: traceConf}
fmt.Printf("Added trace config %v: handle 0x%x.\n", traceConf, connHandle)
}
func lookupTraceMapping(connHandle uintptr) (TraceConfig, bool) {
traceMapLock.Lock()
defer traceMapLock.Unlock()
entryCopy, found := traceMap[connHandle]
return entryCopy.config, found
}
// 'pop' = get and delete from map before returning the value to the caller
func popTraceMapping(connHandle uintptr) (TraceConfig, bool) {
traceMapLock.Lock()
defer traceMapLock.Unlock()
entryCopy, found := traceMap[connHandle]
if found {
delete(traceMap, connHandle)
fmt.Printf("Pop handle 0x%x: deleted trace config %v.\n", connHandle, entryCopy.config)
}
return entryCopy.config, found
}
// RegisterAggregator makes a Go type available as a SQLite aggregation function.
//
// Because aggregation is incremental, it's implemented in Go with a
// type that has 2 methods: func Step(values) accumulates one row of
// data into the accumulator, and func Done() ret finalizes and
// returns the aggregate value. "values" and "ret" may be any type
// supported by RegisterFunc.
//
// RegisterAggregator takes as implementation a constructor function
// that constructs an instance of the aggregator type each time an
// aggregation begins. The constructor must return a pointer to a
// type, or an interface that implements Step() and Done().
//
// The constructor function and the Step/Done methods may optionally
// return an error in addition to their other return values.
//
// See _example/go_custom_funcs for a detailed example.
func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error {
var ai aggInfo
ai.constructor = reflect.ValueOf(impl)
t := ai.constructor.Type()
if t.Kind() != reflect.Func {
return errors.New("non-function passed to RegisterAggregator")
}
if t.NumOut() != 1 && t.NumOut() != 2 {
return errors.New("SQLite aggregator constructors must return 1 or 2 values")
}
if t.NumOut() == 2 && !t.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
return errors.New("Second return value of SQLite function must be error")
}
if t.NumIn() != 0 {
return errors.New("SQLite aggregator constructors must not have arguments")
}
agg := t.Out(0)
switch agg.Kind() {
case reflect.Ptr, reflect.Interface:
default:
return errors.New("SQlite aggregator constructor must return a pointer object")
}
stepFn, found := agg.MethodByName("Step")
if !found {
return errors.New("SQlite aggregator doesn't have a Step() function")
}
step := stepFn.Type
if step.NumOut() != 0 && step.NumOut() != 1 {
return errors.New("SQlite aggregator Step() function must return 0 or 1 values")
}
if step.NumOut() == 1 && !step.Out(0).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
return errors.New("type of SQlite aggregator Step() return value must be error")
}
stepNArgs := step.NumIn()
start := 0
if agg.Kind() == reflect.Ptr {
// Skip over the method receiver
stepNArgs--
start++
}
if step.IsVariadic() {
stepNArgs--
}
for i := start; i < start+stepNArgs; i++ {
conv, err := callbackArg(step.In(i))
if err != nil {
return err
}
ai.stepArgConverters = append(ai.stepArgConverters, conv)
}
if step.IsVariadic() {
conv, err := callbackArg(t.In(start + stepNArgs).Elem())
if err != nil {
return err
}
ai.stepVariadicConverter = conv
// Pass -1 to sqlite so that it allows any number of
// arguments. The call helper verifies that the minimum number
// of arguments is present for variadic functions.
stepNArgs = -1
}
doneFn, found := agg.MethodByName("Done")
if !found {
return errors.New("SQlite aggregator doesn't have a Done() function")
}
done := doneFn.Type
doneNArgs := done.NumIn()
if agg.Kind() == reflect.Ptr {
// Skip over the method receiver
doneNArgs--
}
if doneNArgs != 0 {
return errors.New("SQlite aggregator Done() function must have no arguments")
}
if done.NumOut() != 1 && done.NumOut() != 2 {
return errors.New("SQLite aggregator Done() function must return 1 or 2 values")
}
if done.NumOut() == 2 && !done.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
return errors.New("second return value of SQLite aggregator Done() function must be error")
}
conv, err := callbackRet(done.Out(0))
if err != nil {
return err
}
ai.doneRetConverter = conv
ai.active = make(map[int64]reflect.Value)
ai.next = 1
// ai must outlast the database connection, or we'll have dangling pointers.
c.aggregators = append(c.aggregators, &ai)
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
opts := C.SQLITE_UTF8
if pure {
opts |= C.SQLITE_DETERMINISTIC
}
rv := sqlite3CreateFunction(c.db, cname, C.int(stepNArgs), C.int(opts), newHandle(c, &ai), nil, C.stepTrampoline, C.doneTrampoline)
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}
// SetTrace installs or removes the trace callback for the given database connection.
// It's not named 'RegisterTrace' because only one callback can be kept and called.
// Calling SetTrace a second time on same database connection
// overrides (cancels) any prior callback and all its settings:
// event mask, etc.
func (c *SQLiteConn) SetTrace(requested *TraceConfig) error {
connHandle := uintptr(unsafe.Pointer(c.db))
_, _ = popTraceMapping(connHandle)
if requested == nil {
// The traceMap entry was deleted already by popTraceMapping():
// can disable all events now, no need to watch for TraceClose.
err := c.setSQLiteTrace(0)
return err
}
reqCopy := *requested
// Disable potentially expensive operations
// if their result will not be used. We are doing this
// just in case the caller provided nonsensical input.
if reqCopy.EventMask&TraceStmt == 0 {
reqCopy.WantExpandedSQL = false
}
addTraceMapping(connHandle, reqCopy)
// The callback trampoline function does cleanup on Close event,
// regardless of the presence or absence of the user callback.
// Therefore it needs the Close event to be selected:
actualEventMask := uint(reqCopy.EventMask | TraceClose)
err := c.setSQLiteTrace(actualEventMask)
return err
}
func (c *SQLiteConn) setSQLiteTrace(sqliteEventMask uint) error {
rv := C.sqlite3_trace_v2(c.db,
C.uint(sqliteEventMask),
(*[0]byte)(unsafe.Pointer(C.traceCallbackTrampoline)),
unsafe.Pointer(c.db)) // Fourth arg is same as first: we are
// passing the database connection handle as callback context.
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}

View File

@@ -0,0 +1,72 @@
// Copyright (C) 2016 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build trace
package sqlite3
import (
"database/sql"
"testing"
)
type sumAggregator int64
func (s *sumAggregator) Step(x int64) {
*s += sumAggregator(x)
}
func (s *sumAggregator) Done() int64 {
return int64(*s)
}
func TestAggregatorRegistration(t *testing.T) {
customSum := func() *sumAggregator {
var ret sumAggregator
return &ret
}
sql.Register("sqlite3_AggregatorRegistration", &SQLiteDriver{
ConnectHook: func(conn *SQLiteConn) error {
if err := conn.RegisterAggregator("customSum", customSum, true); err != nil {
return err
}
return nil
},
})
db, err := sql.Open("sqlite3_AggregatorRegistration", ":memory:")
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("create table foo (department integer, profits integer)")
if err != nil {
// trace feature is not implemented
t.Skip("Failed to create table:", err)
}
_, err = db.Exec("insert into foo values (1, 10), (1, 20), (2, 42)")
if err != nil {
t.Fatal("Failed to insert records:", err)
}
tests := []struct {
dept, sum int64
}{
{1, 30},
{2, 42},
}
for _, test := range tests {
var ret int64
err = db.QueryRow("select customSum(profits) from foo where department = $1 group by department", test.dept).Scan(&ret)
if err != nil {
t.Fatal("Query failed:", err)
}
if ret != test.sum {
t.Fatalf("Custom sum returned wrong value, got %d, want %d", ret, test.sum)
}
}
}

57
vendor/github.com/mattn/go-sqlite3/sqlite3_type.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
*/
import "C"
import (
"reflect"
"time"
)
// ColumnTypeDatabaseTypeName implement RowsColumnTypeDatabaseTypeName.
func (rc *SQLiteRows) ColumnTypeDatabaseTypeName(i int) string {
return C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))
}
/*
func (rc *SQLiteRows) ColumnTypeLength(index int) (length int64, ok bool) {
return 0, false
}
func (rc *SQLiteRows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
return 0, 0, false
}
*/
// ColumnTypeNullable implement RowsColumnTypeNullable.
func (rc *SQLiteRows) ColumnTypeNullable(i int) (nullable, ok bool) {
return true, true
}
// ColumnTypeScanType implement RowsColumnTypeScanType.
func (rc *SQLiteRows) ColumnTypeScanType(i int) reflect.Type {
switch C.sqlite3_column_type(rc.s.s, C.int(i)) {
case C.SQLITE_INTEGER:
switch C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))) {
case "timestamp", "datetime", "date":
return reflect.TypeOf(time.Time{})
case "boolean":
return reflect.TypeOf(false)
}
return reflect.TypeOf(int64(0))
case C.SQLITE_FLOAT:
return reflect.TypeOf(float64(0))
case C.SQLITE_BLOB:
return reflect.SliceOf(reflect.TypeOf(byte(0)))
case C.SQLITE_NULL:
return reflect.TypeOf(nil)
case C.SQLITE_TEXT:
return reflect.TypeOf("")
}
return reflect.SliceOf(reflect.TypeOf(byte(0)))
}

646
vendor/github.com/mattn/go-sqlite3/sqlite3_vtable.go generated vendored Normal file
View File

@@ -0,0 +1,646 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build vtable
package sqlite3
/*
#cgo CFLAGS: -std=gnu99
#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
#cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15
#cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA=1
#cgo CFLAGS: -Wno-deprecated-declarations
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
#include <stdint.h>
#include <memory.h>
static inline char *_sqlite3_mprintf(char *zFormat, char *arg) {
return sqlite3_mprintf(zFormat, arg);
}
typedef struct goVTab goVTab;
struct goVTab {
sqlite3_vtab base;
void *vTab;
};
uintptr_t goMInit(void *db, void *pAux, int argc, char **argv, char **pzErr, int isCreate);
static int cXInit(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr, int isCreate) {
void *vTab = (void *)goMInit(db, pAux, argc, (char**)argv, pzErr, isCreate);
if (!vTab || *pzErr) {
return SQLITE_ERROR;
}
goVTab *pvTab = (goVTab *)sqlite3_malloc(sizeof(goVTab));
if (!pvTab) {
*pzErr = sqlite3_mprintf("%s", "Out of memory");
return SQLITE_NOMEM;
}
memset(pvTab, 0, sizeof(goVTab));
pvTab->vTab = vTab;
*ppVTab = (sqlite3_vtab *)pvTab;
*pzErr = 0;
return SQLITE_OK;
}
static inline int cXCreate(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) {
return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 1);
}
static inline int cXConnect(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) {
return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 0);
}
char* goVBestIndex(void *pVTab, void *icp);
static inline int cXBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *info) {
char *pzErr = goVBestIndex(((goVTab*)pVTab)->vTab, info);
if (pzErr) {
if (pVTab->zErrMsg)
sqlite3_free(pVTab->zErrMsg);
pVTab->zErrMsg = pzErr;
return SQLITE_ERROR;
}
return SQLITE_OK;
}
char* goVRelease(void *pVTab, int isDestroy);
static int cXRelease(sqlite3_vtab *pVTab, int isDestroy) {
char *pzErr = goVRelease(((goVTab*)pVTab)->vTab, isDestroy);
if (pzErr) {
if (pVTab->zErrMsg)
sqlite3_free(pVTab->zErrMsg);
pVTab->zErrMsg = pzErr;
return SQLITE_ERROR;
}
if (pVTab->zErrMsg)
sqlite3_free(pVTab->zErrMsg);
sqlite3_free(pVTab);
return SQLITE_OK;
}
static inline int cXDisconnect(sqlite3_vtab *pVTab) {
return cXRelease(pVTab, 0);
}
static inline int cXDestroy(sqlite3_vtab *pVTab) {
return cXRelease(pVTab, 1);
}
typedef struct goVTabCursor goVTabCursor;
struct goVTabCursor {
sqlite3_vtab_cursor base;
void *vTabCursor;
};
uintptr_t goVOpen(void *pVTab, char **pzErr);
static int cXOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
void *vTabCursor = (void *)goVOpen(((goVTab*)pVTab)->vTab, &(pVTab->zErrMsg));
goVTabCursor *pCursor = (goVTabCursor *)sqlite3_malloc(sizeof(goVTabCursor));
if (!pCursor) {
return SQLITE_NOMEM;
}
memset(pCursor, 0, sizeof(goVTabCursor));
pCursor->vTabCursor = vTabCursor;
*ppCursor = (sqlite3_vtab_cursor *)pCursor;
return SQLITE_OK;
}
static int setErrMsg(sqlite3_vtab_cursor *pCursor, char *pzErr) {
if (pCursor->pVtab->zErrMsg)
sqlite3_free(pCursor->pVtab->zErrMsg);
pCursor->pVtab->zErrMsg = pzErr;
return SQLITE_ERROR;
}
char* goVClose(void *pCursor);
static int cXClose(sqlite3_vtab_cursor *pCursor) {
char *pzErr = goVClose(((goVTabCursor*)pCursor)->vTabCursor);
if (pzErr) {
return setErrMsg(pCursor, pzErr);
}
sqlite3_free(pCursor);
return SQLITE_OK;
}
char* goVFilter(void *pCursor, int idxNum, char* idxName, int argc, sqlite3_value **argv);
static int cXFilter(sqlite3_vtab_cursor *pCursor, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
char *pzErr = goVFilter(((goVTabCursor*)pCursor)->vTabCursor, idxNum, (char*)idxStr, argc, argv);
if (pzErr) {
return setErrMsg(pCursor, pzErr);
}
return SQLITE_OK;
}
char* goVNext(void *pCursor);
static int cXNext(sqlite3_vtab_cursor *pCursor) {
char *pzErr = goVNext(((goVTabCursor*)pCursor)->vTabCursor);
if (pzErr) {
return setErrMsg(pCursor, pzErr);
}
return SQLITE_OK;
}
int goVEof(void *pCursor);
static inline int cXEof(sqlite3_vtab_cursor *pCursor) {
return goVEof(((goVTabCursor*)pCursor)->vTabCursor);
}
char* goVColumn(void *pCursor, void *cp, int col);
static int cXColumn(sqlite3_vtab_cursor *pCursor, sqlite3_context *ctx, int i) {
char *pzErr = goVColumn(((goVTabCursor*)pCursor)->vTabCursor, ctx, i);
if (pzErr) {
return setErrMsg(pCursor, pzErr);
}
return SQLITE_OK;
}
char* goVRowid(void *pCursor, sqlite3_int64 *pRowid);
static int cXRowid(sqlite3_vtab_cursor *pCursor, sqlite3_int64 *pRowid) {
char *pzErr = goVRowid(((goVTabCursor*)pCursor)->vTabCursor, pRowid);
if (pzErr) {
return setErrMsg(pCursor, pzErr);
}
return SQLITE_OK;
}
char* goVUpdate(void *pVTab, int argc, sqlite3_value **argv, sqlite3_int64 *pRowid);
static int cXUpdate(sqlite3_vtab *pVTab, int argc, sqlite3_value **argv, sqlite3_int64 *pRowid) {
char *pzErr = goVUpdate(((goVTab*)pVTab)->vTab, argc, argv, pRowid);
if (pzErr) {
if (pVTab->zErrMsg)
sqlite3_free(pVTab->zErrMsg);
pVTab->zErrMsg = pzErr;
return SQLITE_ERROR;
}
return SQLITE_OK;
}
static sqlite3_module goModule = {
0, // iVersion
cXCreate, // xCreate - create a table
cXConnect, // xConnect - connect to an existing table
cXBestIndex, // xBestIndex - Determine search strategy
cXDisconnect, // xDisconnect - Disconnect from a table
cXDestroy, // xDestroy - Drop a table
cXOpen, // xOpen - open a cursor
cXClose, // xClose - close a cursor
cXFilter, // xFilter - configure scan constraints
cXNext, // xNext - advance a cursor
cXEof, // xEof
cXColumn, // xColumn - read data
cXRowid, // xRowid - read data
cXUpdate, // xUpdate - write data
// Not implemented
0, // xBegin - begin transaction
0, // xSync - sync transaction
0, // xCommit - commit transaction
0, // xRollback - rollback transaction
0, // xFindFunction - function overloading
0, // xRename - rename the table
0, // xSavepoint
0, // xRelease
0 // xRollbackTo
};
void goMDestroy(void*);
static int _sqlite3_create_module(sqlite3 *db, const char *zName, uintptr_t pClientData) {
return sqlite3_create_module_v2(db, zName, &goModule, (void*) pClientData, goMDestroy);
}
*/
import "C"
import (
"fmt"
"math"
"reflect"
"unsafe"
)
type sqliteModule struct {
c *SQLiteConn
name string
module Module
}
type sqliteVTab struct {
module *sqliteModule
vTab VTab
}
type sqliteVTabCursor struct {
vTab *sqliteVTab
vTabCursor VTabCursor
}
// Op is type of operations.
type Op uint8
// Op mean identity of operations.
const (
OpEQ Op = 2
OpGT = 4
OpLE = 8
OpLT = 16
OpGE = 32
OpMATCH = 64
OpLIKE = 65 /* 3.10.0 and later only */
OpGLOB = 66 /* 3.10.0 and later only */
OpREGEXP = 67 /* 3.10.0 and later only */
OpScanUnique = 1 /* Scan visits at most 1 row */
)
// InfoConstraint give information of constraint.
type InfoConstraint struct {
Column int
Op Op
Usable bool
}
// InfoOrderBy give information of order-by.
type InfoOrderBy struct {
Column int
Desc bool
}
func constraints(info *C.sqlite3_index_info) []InfoConstraint {
l := info.nConstraint
slice := (*[1 << 30]C.struct_sqlite3_index_constraint)(unsafe.Pointer(info.aConstraint))[:l:l]
cst := make([]InfoConstraint, 0, l)
for _, c := range slice {
var usable bool
if c.usable > 0 {
usable = true
}
cst = append(cst, InfoConstraint{
Column: int(c.iColumn),
Op: Op(c.op),
Usable: usable,
})
}
return cst
}
func orderBys(info *C.sqlite3_index_info) []InfoOrderBy {
l := info.nOrderBy
slice := (*[1 << 30]C.struct_sqlite3_index_orderby)(unsafe.Pointer(info.aOrderBy))[:l:l]
ob := make([]InfoOrderBy, 0, l)
for _, c := range slice {
var desc bool
if c.desc > 0 {
desc = true
}
ob = append(ob, InfoOrderBy{
Column: int(c.iColumn),
Desc: desc,
})
}
return ob
}
// IndexResult is a Go struct representation of what eventually ends up in the
// output fields for `sqlite3_index_info`
// See: https://www.sqlite.org/c3ref/index_info.html
type IndexResult struct {
Used []bool // aConstraintUsage
IdxNum int
IdxStr string
AlreadyOrdered bool // orderByConsumed
EstimatedCost float64
EstimatedRows float64
}
// mPrintf is a utility wrapper around sqlite3_mprintf
func mPrintf(format, arg string) *C.char {
cf := C.CString(format)
defer C.free(unsafe.Pointer(cf))
ca := C.CString(arg)
defer C.free(unsafe.Pointer(ca))
return C._sqlite3_mprintf(cf, ca)
}
//export goMInit
func goMInit(db, pClientData unsafe.Pointer, argc C.int, argv **C.char, pzErr **C.char, isCreate C.int) C.uintptr_t {
m := lookupHandle(uintptr(pClientData)).(*sqliteModule)
if m.c.db != (*C.sqlite3)(db) {
*pzErr = mPrintf("%s", "Inconsistent db handles")
return 0
}
args := make([]string, argc)
var A []*C.char
slice := reflect.SliceHeader{Data: uintptr(unsafe.Pointer(argv)), Len: int(argc), Cap: int(argc)}
a := reflect.NewAt(reflect.TypeOf(A), unsafe.Pointer(&slice)).Elem().Interface()
for i, s := range a.([]*C.char) {
args[i] = C.GoString(s)
}
var vTab VTab
var err error
if isCreate == 1 {
vTab, err = m.module.Create(m.c, args)
} else {
vTab, err = m.module.Connect(m.c, args)
}
if err != nil {
*pzErr = mPrintf("%s", err.Error())
return 0
}
vt := sqliteVTab{m, vTab}
*pzErr = nil
return C.uintptr_t(newHandle(m.c, &vt))
}
//export goVRelease
func goVRelease(pVTab unsafe.Pointer, isDestroy C.int) *C.char {
vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
var err error
if isDestroy == 1 {
err = vt.vTab.Destroy()
} else {
err = vt.vTab.Disconnect()
}
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
//export goVOpen
func goVOpen(pVTab unsafe.Pointer, pzErr **C.char) C.uintptr_t {
vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
vTabCursor, err := vt.vTab.Open()
if err != nil {
*pzErr = mPrintf("%s", err.Error())
return 0
}
vtc := sqliteVTabCursor{vt, vTabCursor}
*pzErr = nil
return C.uintptr_t(newHandle(vt.module.c, &vtc))
}
//export goVBestIndex
func goVBestIndex(pVTab unsafe.Pointer, icp unsafe.Pointer) *C.char {
vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
info := (*C.sqlite3_index_info)(icp)
csts := constraints(info)
res, err := vt.vTab.BestIndex(csts, orderBys(info))
if err != nil {
return mPrintf("%s", err.Error())
}
if len(res.Used) != len(csts) {
return mPrintf("Result.Used != expected value", "")
}
// Get a pointer to constraint_usage struct so we can update in place.
l := info.nConstraint
s := (*[1 << 30]C.struct_sqlite3_index_constraint_usage)(unsafe.Pointer(info.aConstraintUsage))[:l:l]
index := 1
for i := C.int(0); i < info.nConstraint; i++ {
if res.Used[i] {
s[i].argvIndex = C.int(index)
s[i].omit = C.uchar(1)
index++
}
}
info.idxNum = C.int(res.IdxNum)
idxStr := C.CString(res.IdxStr)
defer C.free(unsafe.Pointer(idxStr))
info.idxStr = idxStr
info.needToFreeIdxStr = C.int(0)
if res.AlreadyOrdered {
info.orderByConsumed = C.int(1)
}
info.estimatedCost = C.double(res.EstimatedCost)
info.estimatedRows = C.sqlite3_int64(res.EstimatedRows)
return nil
}
//export goVClose
func goVClose(pCursor unsafe.Pointer) *C.char {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
err := vtc.vTabCursor.Close()
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
//export goMDestroy
func goMDestroy(pClientData unsafe.Pointer) {
m := lookupHandle(uintptr(pClientData)).(*sqliteModule)
m.module.DestroyModule()
}
//export goVFilter
func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
vals := make([]interface{}, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
return mPrintf("%s", err.Error())
}
vals = append(vals, conv.Interface())
}
err := vtc.vTabCursor.Filter(int(idxNum), C.GoString(idxName), vals)
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
//export goVNext
func goVNext(pCursor unsafe.Pointer) *C.char {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
err := vtc.vTabCursor.Next()
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
//export goVEof
func goVEof(pCursor unsafe.Pointer) C.int {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
err := vtc.vTabCursor.EOF()
if err {
return 1
}
return 0
}
//export goVColumn
func goVColumn(pCursor, cp unsafe.Pointer, col C.int) *C.char {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
c := (*SQLiteContext)(cp)
err := vtc.vTabCursor.Column(c, int(col))
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
//export goVRowid
func goVRowid(pCursor unsafe.Pointer, pRowid *C.sqlite3_int64) *C.char {
vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
rowid, err := vtc.vTabCursor.Rowid()
if err != nil {
return mPrintf("%s", err.Error())
}
*pRowid = C.sqlite3_int64(rowid)
return nil
}
//export goVUpdate
func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid *C.sqlite3_int64) *C.char {
vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
var tname string
if n, ok := vt.vTab.(interface {
TableName() string
}); ok {
tname = n.TableName() + " "
}
err := fmt.Errorf("virtual %s table %sis read-only", vt.module.name, tname)
if v, ok := vt.vTab.(VTabUpdater); ok {
// convert argv
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
vals := make([]interface{}, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
return mPrintf("%s", err.Error())
}
// work around for SQLITE_NULL
x := conv.Interface()
if z, ok := x.([]byte); ok && z == nil {
x = nil
}
vals = append(vals, x)
}
switch {
case argc == 1:
err = v.Delete(vals[0])
case argc > 1 && vals[0] == nil:
var id int64
id, err = v.Insert(vals[1], vals[2:])
if err == nil {
*pRowid = C.sqlite3_int64(id)
}
case argc > 1:
err = v.Update(vals[1], vals[2:])
}
}
if err != nil {
return mPrintf("%s", err.Error())
}
return nil
}
// Module is a "virtual table module", it defines the implementation of a
// virtual tables. See: http://sqlite.org/c3ref/module.html
type Module interface {
// http://sqlite.org/vtab.html#xcreate
Create(c *SQLiteConn, args []string) (VTab, error)
// http://sqlite.org/vtab.html#xconnect
Connect(c *SQLiteConn, args []string) (VTab, error)
// http://sqlite.org/c3ref/create_module.html
DestroyModule()
}
// VTab describes a particular instance of the virtual table.
// See: http://sqlite.org/c3ref/vtab.html
type VTab interface {
// http://sqlite.org/vtab.html#xbestindex
BestIndex([]InfoConstraint, []InfoOrderBy) (*IndexResult, error)
// http://sqlite.org/vtab.html#xdisconnect
Disconnect() error
// http://sqlite.org/vtab.html#sqlite3_module.xDestroy
Destroy() error
// http://sqlite.org/vtab.html#xopen
Open() (VTabCursor, error)
}
// VTabUpdater is a type that allows a VTab to be inserted, updated, or
// deleted.
// See: https://sqlite.org/vtab.html#xupdate
type VTabUpdater interface {
Delete(interface{}) error
Insert(interface{}, []interface{}) (int64, error)
Update(interface{}, []interface{}) error
}
// VTabCursor describes cursors that point into the virtual table and are used
// to loop through the virtual table. See: http://sqlite.org/c3ref/vtab_cursor.html
type VTabCursor interface {
// http://sqlite.org/vtab.html#xclose
Close() error
// http://sqlite.org/vtab.html#xfilter
Filter(idxNum int, idxStr string, vals []interface{}) error
// http://sqlite.org/vtab.html#xnext
Next() error
// http://sqlite.org/vtab.html#xeof
EOF() bool
// http://sqlite.org/vtab.html#xcolumn
Column(c *SQLiteContext, col int) error
// http://sqlite.org/vtab.html#xrowid
Rowid() (int64, error)
}
// DeclareVTab declares the Schema of a virtual table.
// See: http://sqlite.org/c3ref/declare_vtab.html
func (c *SQLiteConn) DeclareVTab(sql string) error {
zSQL := C.CString(sql)
defer C.free(unsafe.Pointer(zSQL))
rv := C.sqlite3_declare_vtab(c.db, zSQL)
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}
// CreateModule registers a virtual table implementation.
// See: http://sqlite.org/c3ref/create_module.html
func (c *SQLiteConn) CreateModule(moduleName string, module Module) error {
mname := C.CString(moduleName)
defer C.free(unsafe.Pointer(mname))
udm := sqliteModule{c, moduleName, module}
rv := C._sqlite3_create_module(c.db, mname, C.uintptr_t(newHandle(c, &udm)))
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More