mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
Bye bye openapi (#1081)
* add DateTime sans mgo * change all uses of strfmt.DateTime to common.DateTime, remove test strfmt usage * remove api tests, system-test dep on api test multiple reasons to remove the api tests: * awkward dependency with fn_go meant generating bindings on a branched fn to vendor those to test new stuff. this is at a minimum not at all intuitive, worth it, nor a fun way to spend the finite amount of time we have to live. * api tests only tested a subset of functionality that the server/ api tests already test, and we risk having tests where one tests some thing and the other doesn't. let's not. we have too many test suites as it is, and these pretty much only test that we updated the fn_go bindings, which is actually a hassle as noted above and the cli will pretty quickly figure out anyway. * fn_go relies on openapi, which relies on mgo, which is deprecated and we'd like to remove as a dependency. openapi is a _huge_ dep built in a NIH fashion, that cannot simply remove the mgo dep as users may be using it. we've now stolen their date time and otherwise killed usage of it in fn core, for fn_go it still exists but that's less of a problem. * update deps removals: * easyjson * mgo * go-openapi * mapstructure * fn_go * purell * go-validator also, had to lock docker. we shouldn't use docker on master anyway, they strongly advise against that. had no luck with latest version rev, so i locked it to what we were using before. until next time. the rest is just playing dep roulette, those end up removing a ton tho * fix exec test to work * account for john le cache
This commit is contained in:
45
vendor/gopkg.in/mgo.v2/.travis.yml
generated
vendored
45
vendor/gopkg.in/mgo.v2/.travis.yml
generated
vendored
@@ -1,45 +0,0 @@
|
||||
language: go
|
||||
|
||||
go_import_path: gopkg.in/mgo.v2
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
|
||||
env:
|
||||
global:
|
||||
- BUCKET=https://niemeyer.s3.amazonaws.com
|
||||
matrix:
|
||||
- GO=1.4.1 MONGODB=x86_64-2.2.7
|
||||
- GO=1.4.1 MONGODB=x86_64-2.4.14
|
||||
- GO=1.4.1 MONGODB=x86_64-2.6.11
|
||||
- GO=1.4.1 MONGODB=x86_64-3.0.9
|
||||
- GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal
|
||||
- GO=1.5.3 MONGODB=x86_64-3.0.9
|
||||
- GO=1.6 MONGODB=x86_64-3.0.9
|
||||
|
||||
install:
|
||||
- eval "$(gimme $GO)"
|
||||
|
||||
- wget $BUCKET/mongodb-linux-$MONGODB.tgz
|
||||
- tar xzvf mongodb-linux-$MONGODB.tgz
|
||||
- export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH
|
||||
|
||||
- wget $BUCKET/daemontools.tar.gz
|
||||
- tar xzvf daemontools.tar.gz
|
||||
- export PATH=$PWD/daemontools:$PATH
|
||||
|
||||
- go get gopkg.in/check.v1
|
||||
- go get gopkg.in/yaml.v2
|
||||
- go get gopkg.in/tomb.v2
|
||||
|
||||
before_script:
|
||||
- export NOIPV6=1
|
||||
- make startdb
|
||||
|
||||
script:
|
||||
- (cd bson && go test -check.v)
|
||||
- go test -check.v -fast
|
||||
- (cd txn && go test -check.v)
|
||||
|
||||
# vim:sw=4:ts=4:et
|
||||
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
25
vendor/gopkg.in/mgo.v2/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
mgo - MongoDB driver for Go
|
||||
|
||||
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
5
vendor/gopkg.in/mgo.v2/Makefile
generated
vendored
@@ -1,5 +0,0 @@
|
||||
startdb:
|
||||
@harness/setup.sh start
|
||||
|
||||
stopdb:
|
||||
@harness/setup.sh stop
|
||||
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
4
vendor/gopkg.in/mgo.v2/README.md
generated
vendored
@@ -1,4 +0,0 @@
|
||||
The MongoDB driver for Go
|
||||
-------------------------
|
||||
|
||||
Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
|
||||
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
467
vendor/gopkg.in/mgo.v2/auth.go
generated
vendored
@@ -1,467 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"gopkg.in/mgo.v2/internal/scram"
|
||||
)
|
||||
|
||||
type authCmd struct {
|
||||
Authenticate int
|
||||
|
||||
Nonce string
|
||||
User string
|
||||
Key string
|
||||
}
|
||||
|
||||
type startSaslCmd struct {
|
||||
StartSASL int `bson:"startSasl"`
|
||||
}
|
||||
|
||||
type authResult struct {
|
||||
ErrMsg string
|
||||
Ok bool
|
||||
}
|
||||
|
||||
type getNonceCmd struct {
|
||||
GetNonce int
|
||||
}
|
||||
|
||||
type getNonceResult struct {
|
||||
Nonce string
|
||||
Err string "$err"
|
||||
Code int
|
||||
}
|
||||
|
||||
type logoutCmd struct {
|
||||
Logout int
|
||||
}
|
||||
|
||||
type saslCmd struct {
|
||||
Start int `bson:"saslStart,omitempty"`
|
||||
Continue int `bson:"saslContinue,omitempty"`
|
||||
ConversationId int `bson:"conversationId,omitempty"`
|
||||
Mechanism string `bson:"mechanism,omitempty"`
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type saslResult struct {
|
||||
Ok bool `bson:"ok"`
|
||||
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
|
||||
Done bool
|
||||
|
||||
ConversationId int `bson:"conversationId"`
|
||||
Payload []byte
|
||||
ErrMsg string
|
||||
}
|
||||
|
||||
type saslStepper interface {
|
||||
Step(serverData []byte) (clientData []byte, done bool, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) getNonce() (nonce string, err error) {
|
||||
socket.Lock()
|
||||
for socket.cachedNonce == "" && socket.dead == nil {
|
||||
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
|
||||
socket.gotNonce.Wait()
|
||||
}
|
||||
if socket.cachedNonce == "mongos" {
|
||||
socket.Unlock()
|
||||
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
|
||||
}
|
||||
debugf("Socket %p to %s: got nonce", socket, socket.addr)
|
||||
nonce, err = socket.cachedNonce, socket.dead
|
||||
socket.cachedNonce = ""
|
||||
socket.Unlock()
|
||||
if err != nil {
|
||||
nonce = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) resetNonce() {
|
||||
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
|
||||
op := &queryOp{}
|
||||
op.query = &getNonceCmd{GetNonce: 1}
|
||||
op.collection = "admin.$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
if err != nil {
|
||||
socket.kill(errors.New("getNonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
result := &getNonceResult{}
|
||||
err = bson.Unmarshal(docData, &result)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
|
||||
return
|
||||
}
|
||||
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
|
||||
if result.Code == 13390 {
|
||||
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
|
||||
result.Nonce = "mongos"
|
||||
} else if result.Nonce == "" {
|
||||
var msg string
|
||||
if result.Err != "" {
|
||||
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
|
||||
} else {
|
||||
msg = "Got an empty nonce"
|
||||
}
|
||||
socket.kill(errors.New(msg), true)
|
||||
return
|
||||
}
|
||||
socket.Lock()
|
||||
if socket.cachedNonce != "" {
|
||||
socket.Unlock()
|
||||
panic("resetNonce: nonce already cached")
|
||||
}
|
||||
socket.cachedNonce = result.Nonce
|
||||
socket.gotNonce.Signal()
|
||||
socket.Unlock()
|
||||
}
|
||||
err := socket.Query(op)
|
||||
if err != nil {
|
||||
socket.kill(errors.New("resetNonce: "+err.Error()), true)
|
||||
}
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Login(cred Credential) error {
|
||||
socket.Lock()
|
||||
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
|
||||
cred.Mechanism = "SCRAM-SHA-1"
|
||||
}
|
||||
for _, sockCred := range socket.creds {
|
||||
if sockCred == cred {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if socket.dropLogout(cred) {
|
||||
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
}
|
||||
socket.Unlock()
|
||||
|
||||
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
|
||||
|
||||
var err error
|
||||
switch cred.Mechanism {
|
||||
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
|
||||
err = socket.loginClassic(cred)
|
||||
case "PLAIN":
|
||||
err = socket.loginPlain(cred)
|
||||
case "MONGODB-X509":
|
||||
err = socket.loginX509(cred)
|
||||
default:
|
||||
// Try SASL for everything else, if it is available.
|
||||
err = socket.loginSASL(cred)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
|
||||
} else {
|
||||
debugf("Socket %p to %s: login successful", socket, socket.addr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginClassic(cred Credential) error {
|
||||
// Note that this only works properly because this function is
|
||||
// synchronous, which means the nonce won't get reset while we're
|
||||
// using it and any other login requests will block waiting for a
|
||||
// new nonce provided in the defer call below.
|
||||
nonce, err := socket.getNonce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer socket.resetNonce()
|
||||
|
||||
psum := md5.New()
|
||||
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
|
||||
ksum := md5.New()
|
||||
ksum.Write([]byte(nonce + cred.Username))
|
||||
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
|
||||
|
||||
key := hex.EncodeToString(ksum.Sum(nil))
|
||||
|
||||
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type authX509Cmd struct {
|
||||
Authenticate int
|
||||
User string
|
||||
Mechanism string
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginX509(cred Credential) error {
|
||||
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginPlain(cred Credential) error {
|
||||
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
|
||||
res := authResult{}
|
||||
return socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
if !res.Ok {
|
||||
return errors.New(res.ErrMsg)
|
||||
}
|
||||
socket.Lock()
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
socket.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginSASL(cred Credential) error {
|
||||
var sasl saslStepper
|
||||
var err error
|
||||
if cred.Mechanism == "SCRAM-SHA-1" {
|
||||
// SCRAM is handled without external libraries.
|
||||
sasl = saslNewScram(cred)
|
||||
} else if len(cred.ServiceHost) > 0 {
|
||||
sasl, err = saslNew(cred, cred.ServiceHost)
|
||||
} else {
|
||||
sasl, err = saslNew(cred, socket.Server().Addr)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sasl.Close()
|
||||
|
||||
// The goal of this logic is to carry a locked socket until the
|
||||
// local SASL step confirms the auth is valid; the socket needs to be
|
||||
// locked so that concurrent action doesn't leave the socket in an
|
||||
// auth state that doesn't reflect the operations that took place.
|
||||
// As a simple case, imagine inverting login=>logout to logout=>login.
|
||||
//
|
||||
// The logic below works because the lock func isn't called concurrently.
|
||||
locked := false
|
||||
lock := func(b bool) {
|
||||
if locked != b {
|
||||
locked = b
|
||||
if b {
|
||||
socket.Lock()
|
||||
} else {
|
||||
socket.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lock(true)
|
||||
defer lock(false)
|
||||
|
||||
start := 1
|
||||
cmd := saslCmd{}
|
||||
res := saslResult{}
|
||||
for {
|
||||
payload, done, err := sasl.Step(res.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
lock(false)
|
||||
|
||||
cmd = saslCmd{
|
||||
Start: start,
|
||||
Continue: 1 - start,
|
||||
ConversationId: res.ConversationId,
|
||||
Mechanism: cred.Mechanism,
|
||||
Payload: payload,
|
||||
}
|
||||
start = 0
|
||||
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
|
||||
// See the comment on lock for why this is necessary.
|
||||
lock(true)
|
||||
if !res.Ok || res.NotOk {
|
||||
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done && res.Done {
|
||||
socket.dropAuth(cred.Source)
|
||||
socket.creds = append(socket.creds, cred)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func saslNewScram(cred Credential) *saslScram {
|
||||
credsum := md5.New()
|
||||
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
|
||||
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
|
||||
return &saslScram{cred: cred, client: client}
|
||||
}
|
||||
|
||||
type saslScram struct {
|
||||
cred Credential
|
||||
client *scram.Client
|
||||
}
|
||||
|
||||
func (s *saslScram) Close() {}
|
||||
|
||||
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
|
||||
more := s.client.Step(serverData)
|
||||
return s.client.Out(), !more, s.client.Err()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
|
||||
var mutex sync.Mutex
|
||||
var replyErr error
|
||||
mutex.Lock()
|
||||
|
||||
op := queryOp{}
|
||||
op.query = query
|
||||
op.collection = db + ".$cmd"
|
||||
op.limit = -1
|
||||
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
|
||||
defer mutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
return
|
||||
}
|
||||
|
||||
err = bson.Unmarshal(docData, result)
|
||||
if err != nil {
|
||||
replyErr = err
|
||||
} else {
|
||||
// Must handle this within the read loop for the socket, so
|
||||
// that concurrent login requests are properly ordered.
|
||||
replyErr = f()
|
||||
}
|
||||
}
|
||||
|
||||
err := socket.Query(&op)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mutex.Lock() // Wait.
|
||||
return replyErr
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) Logout(db string) {
|
||||
socket.Lock()
|
||||
cred, found := socket.dropAuth(db)
|
||||
if found {
|
||||
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
|
||||
socket.logout = append(socket.logout, cred)
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) LogoutAll() {
|
||||
socket.Lock()
|
||||
if l := len(socket.creds); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
|
||||
socket.logout = append(socket.logout, socket.creds...)
|
||||
socket.creds = socket.creds[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
|
||||
socket.Lock()
|
||||
if l := len(socket.logout); l > 0 {
|
||||
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
|
||||
for i := 0; i != l; i++ {
|
||||
op := queryOp{}
|
||||
op.query = &logoutCmd{1}
|
||||
op.collection = socket.logout[i].Source + ".$cmd"
|
||||
op.limit = -1
|
||||
ops = append(ops, &op)
|
||||
}
|
||||
socket.logout = socket.logout[0:0]
|
||||
}
|
||||
socket.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
|
||||
for i, sockCred := range socket.creds {
|
||||
if sockCred.Source == db {
|
||||
copy(socket.creds[i:], socket.creds[i+1:])
|
||||
socket.creds = socket.creds[:len(socket.creds)-1]
|
||||
return sockCred, true
|
||||
}
|
||||
}
|
||||
return cred, false
|
||||
}
|
||||
|
||||
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
|
||||
for i, sockCred := range socket.logout {
|
||||
if sockCred == cred {
|
||||
copy(socket.logout[i:], socket.logout[i+1:])
|
||||
socket.logout = socket.logout[:len(socket.logout)-1]
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
1180
vendor/gopkg.in/mgo.v2/auth_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
25
vendor/gopkg.in/mgo.v2/bson/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
BSON library for Go
|
||||
|
||||
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
738
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
738
vendor/gopkg.in/mgo.v2/bson/bson.go
generated
vendored
@@ -1,738 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package bson is an implementation of the BSON specification for Go:
|
||||
//
|
||||
// http://bsonspec.org
|
||||
//
|
||||
// It was created as part of the mgo MongoDB driver for Go, but is standalone
|
||||
// and may be used on its own without the driver.
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// The public API.
|
||||
|
||||
// A value implementing the bson.Getter interface will have its GetBSON
|
||||
// method called when the given value has to be marshalled, and the result
|
||||
// of this method will be marshaled in place of the actual object.
|
||||
//
|
||||
// If GetBSON returns return a non-nil error, the marshalling procedure
|
||||
// will stop and error out with the provided value.
|
||||
type Getter interface {
|
||||
GetBSON() (interface{}, error)
|
||||
}
|
||||
|
||||
// A value implementing the bson.Setter interface will receive the BSON
|
||||
// value via the SetBSON method during unmarshaling, and the object
|
||||
// itself will not be changed as usual.
|
||||
//
|
||||
// If setting the value works, the method should return nil or alternatively
|
||||
// bson.SetZero to set the respective field to its zero value (nil for
|
||||
// pointer types). If SetBSON returns a value of type bson.TypeError, the
|
||||
// BSON value will be omitted from a map or slice being decoded and the
|
||||
// unmarshalling will continue. If it returns any other non-nil error, the
|
||||
// unmarshalling procedure will stop and error out with the provided value.
|
||||
//
|
||||
// This interface is generally useful in pointer receivers, since the method
|
||||
// will want to change the receiver. A type field that implements the Setter
|
||||
// interface doesn't have to be a pointer, though.
|
||||
//
|
||||
// Unlike the usual behavior, unmarshalling onto a value that implements a
|
||||
// Setter interface will NOT reset the value to its zero state. This allows
|
||||
// the value to decide by itself how to be unmarshalled.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type MyString string
|
||||
//
|
||||
// func (s *MyString) SetBSON(raw bson.Raw) error {
|
||||
// return raw.Unmarshal(s)
|
||||
// }
|
||||
//
|
||||
type Setter interface {
|
||||
SetBSON(raw Raw) error
|
||||
}
|
||||
|
||||
// SetZero may be returned from a SetBSON method to have the value set to
|
||||
// its respective zero value. When used in pointer values, this will set the
|
||||
// field to nil rather than to the pre-allocated value.
|
||||
var SetZero = errors.New("set to zero")
|
||||
|
||||
// M is a convenient alias for a map[string]interface{} map, useful for
|
||||
// dealing with BSON in a native way. For instance:
|
||||
//
|
||||
// bson.M{"a": 1, "b": true}
|
||||
//
|
||||
// There's no special handling for this type in addition to what's done anyway
|
||||
// for an equivalent map type. Elements in the map will be dumped in an
|
||||
// undefined ordered. See also the bson.D type for an ordered alternative.
|
||||
type M map[string]interface{}
|
||||
|
||||
// D represents a BSON document containing ordered elements. For example:
|
||||
//
|
||||
// bson.D{{"a", 1}, {"b", true}}
|
||||
//
|
||||
// In some situations, such as when creating indexes for MongoDB, the order in
|
||||
// which the elements are defined is important. If the order is not important,
|
||||
// using a map is generally more comfortable. See bson.M and bson.RawD.
|
||||
type D []DocElem
|
||||
|
||||
// DocElem is an element of the bson.D document representation.
|
||||
type DocElem struct {
|
||||
Name string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Map returns a map out of the ordered element name/value pairs in d.
|
||||
func (d D) Map() (m M) {
|
||||
m = make(M, len(d))
|
||||
for _, item := range d {
|
||||
m[item.Name] = item.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// The Raw type represents raw unprocessed BSON documents and elements.
|
||||
// Kind is the kind of element as defined per the BSON specification, and
|
||||
// Data is the raw unprocessed data for the respective element.
|
||||
// Using this type it is possible to unmarshal or marshal values partially.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://bsonspec.org/#/specification
|
||||
//
|
||||
type Raw struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RawD represents a BSON document containing raw unprocessed elements.
|
||||
// This low-level representation may be useful when lazily processing
|
||||
// documents of uncertain content, or when manipulating the raw content
|
||||
// documents in general.
|
||||
type RawD []RawDocElem
|
||||
|
||||
// See the RawD type.
|
||||
type RawDocElem struct {
|
||||
Name string
|
||||
Value Raw
|
||||
}
|
||||
|
||||
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
|
||||
// long. MongoDB objects by default have such a property set in their "_id"
|
||||
// property.
|
||||
//
|
||||
// http://www.mongodb.org/display/DOCS/Object+IDs
|
||||
type ObjectId string
|
||||
|
||||
// ObjectIdHex returns an ObjectId from the provided hex representation.
|
||||
// Calling this function with an invalid hex representation will
|
||||
// cause a runtime panic. See the IsObjectIdHex function.
|
||||
func ObjectIdHex(s string) ObjectId {
|
||||
d, err := hex.DecodeString(s)
|
||||
if err != nil || len(d) != 12 {
|
||||
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
|
||||
}
|
||||
return ObjectId(d)
|
||||
}
|
||||
|
||||
// IsObjectIdHex returns whether s is a valid hex representation of
|
||||
// an ObjectId. See the ObjectIdHex function.
|
||||
func IsObjectIdHex(s string) bool {
|
||||
if len(s) != 24 {
|
||||
return false
|
||||
}
|
||||
_, err := hex.DecodeString(s)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// objectIdCounter is atomically incremented when generating a new ObjectId
|
||||
// using NewObjectId() function. It's used as a counter part of an id.
|
||||
var objectIdCounter uint32 = readRandomUint32()
|
||||
|
||||
// readRandomUint32 returns a random objectIdCounter.
|
||||
func readRandomUint32() uint32 {
|
||||
var b [4]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot read random object id: %v", err))
|
||||
}
|
||||
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
// machineId stores machine id generated once and used in subsequent calls
|
||||
// to NewObjectId function.
|
||||
var machineId = readMachineId()
|
||||
var processId = os.Getpid()
|
||||
|
||||
// readMachineId generates and returns a machine id.
|
||||
// If this function fails to get the hostname it will cause a runtime error.
|
||||
func readMachineId() []byte {
|
||||
var sum [3]byte
|
||||
id := sum[:]
|
||||
hostname, err1 := os.Hostname()
|
||||
if err1 != nil {
|
||||
_, err2 := io.ReadFull(rand.Reader, id)
|
||||
if err2 != nil {
|
||||
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
|
||||
}
|
||||
return id
|
||||
}
|
||||
hw := md5.New()
|
||||
hw.Write([]byte(hostname))
|
||||
copy(id, hw.Sum(nil))
|
||||
return id
|
||||
}
|
||||
|
||||
// NewObjectId returns a new unique ObjectId.
|
||||
func NewObjectId() ObjectId {
|
||||
var b [12]byte
|
||||
// Timestamp, 4 bytes, big endian
|
||||
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
|
||||
// Machine, first 3 bytes of md5(hostname)
|
||||
b[4] = machineId[0]
|
||||
b[5] = machineId[1]
|
||||
b[6] = machineId[2]
|
||||
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||
b[7] = byte(processId >> 8)
|
||||
b[8] = byte(processId)
|
||||
// Increment, 3 bytes, big endian
|
||||
i := atomic.AddUint32(&objectIdCounter, 1)
|
||||
b[9] = byte(i >> 16)
|
||||
b[10] = byte(i >> 8)
|
||||
b[11] = byte(i)
|
||||
return ObjectId(b[:])
|
||||
}
|
||||
|
||||
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
|
||||
// with the provided number of seconds from epoch UTC, and all other parts
|
||||
// filled with zeroes. It's not safe to insert a document with an id generated
|
||||
// by this method, it is useful only for queries to find documents with ids
|
||||
// generated before or after the specified timestamp.
|
||||
func NewObjectIdWithTime(t time.Time) ObjectId {
|
||||
var b [12]byte
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
|
||||
return ObjectId(string(b[:]))
|
||||
}
|
||||
|
||||
// String returns a hex string representation of the id.
|
||||
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
|
||||
func (id ObjectId) String() string {
|
||||
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
|
||||
}
|
||||
|
||||
// Hex returns a hex representation of the ObjectId.
|
||||
func (id ObjectId) Hex() string {
|
||||
return hex.EncodeToString([]byte(id))
|
||||
}
|
||||
|
||||
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
|
||||
func (id ObjectId) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
|
||||
}
|
||||
|
||||
var nullBytes = []byte("null")
|
||||
|
||||
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
|
||||
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
|
||||
var v struct {
|
||||
Id json.RawMessage `json:"$oid"`
|
||||
Func struct {
|
||||
Id json.RawMessage
|
||||
} `json:"$oidFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err == nil {
|
||||
if len(v.Id) > 0 {
|
||||
data = []byte(v.Id)
|
||||
} else {
|
||||
data = []byte(v.Func.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
|
||||
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[1:25])
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
|
||||
func (id ObjectId) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%x", string(id))), nil
|
||||
}
|
||||
|
||||
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
|
||||
func (id *ObjectId) UnmarshalText(data []byte) error {
|
||||
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 24 {
|
||||
return fmt.Errorf("invalid ObjectId: %s", data)
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
|
||||
func (id ObjectId) Valid() bool {
|
||||
return len(id) == 12
|
||||
}
|
||||
|
||||
// byteSlice returns byte slice of id from start to end.
|
||||
// Calling this function with an invalid id will cause a runtime panic.
|
||||
func (id ObjectId) byteSlice(start, end int) []byte {
|
||||
if len(id) != 12 {
|
||||
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
|
||||
}
|
||||
return []byte(string(id)[start:end])
|
||||
}
|
||||
|
||||
// Time returns the timestamp part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Time() time.Time {
|
||||
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
|
||||
return time.Unix(secs, 0)
|
||||
}
|
||||
|
||||
// Machine returns the 3-byte machine id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Machine() []byte {
|
||||
return id.byteSlice(4, 7)
|
||||
}
|
||||
|
||||
// Pid returns the process id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Pid() uint16 {
|
||||
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
|
||||
}
|
||||
|
||||
// Counter returns the incrementing value part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Counter() int32 {
|
||||
b := id.byteSlice(9, 12)
|
||||
// Counter is stored as big-endian 3-byte value
|
||||
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||
}
|
||||
|
||||
// The Symbol type is similar to a string and is used in languages with a
|
||||
// distinct symbol type.
|
||||
type Symbol string
|
||||
|
||||
// Now returns the current time with millisecond precision. MongoDB stores
|
||||
// timestamps with the same precision, so a Time returned from this method
|
||||
// will not change after a roundtrip to the database. That's the only reason
|
||||
// why this function exists. Using the time.Now function also works fine
|
||||
// otherwise.
|
||||
func Now() time.Time {
|
||||
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
|
||||
}
|
||||
|
||||
// MongoTimestamp is a special internal type used by MongoDB that for some
|
||||
// strange reason has its own datatype defined in BSON.
|
||||
type MongoTimestamp int64
|
||||
|
||||
type orderKey int64
|
||||
|
||||
// MaxKey is a special value that compares higher than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MaxKey = orderKey(1<<63 - 1)
|
||||
|
||||
// MinKey is a special value that compares lower than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MinKey = orderKey(-1 << 63)
|
||||
|
||||
type undefined struct{}
|
||||
|
||||
// Undefined represents the undefined BSON value.
|
||||
var Undefined undefined
|
||||
|
||||
// Binary is a representation for non-standard binary values. Any kind should
|
||||
// work, but the following are known as of this writing:
|
||||
//
|
||||
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
|
||||
// 0x01 - Function (!?)
|
||||
// 0x02 - Obsolete generic.
|
||||
// 0x03 - UUID
|
||||
// 0x05 - MD5
|
||||
// 0x80 - User defined.
|
||||
//
|
||||
type Binary struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RegEx represents a regular expression. The Options field may contain
|
||||
// individual characters defining the way in which the pattern should be
|
||||
// applied, and must be sorted. Valid options as of this writing are 'i' for
|
||||
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
|
||||
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
|
||||
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
|
||||
// unicode. The value of the Options parameter is not verified before being
|
||||
// marshaled into the BSON format.
|
||||
type RegEx struct {
|
||||
Pattern string
|
||||
Options string
|
||||
}
|
||||
|
||||
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
|
||||
// will be marshaled as a mapping from identifiers to values that may be
|
||||
// used when evaluating the provided Code.
|
||||
type JavaScript struct {
|
||||
Code string
|
||||
Scope interface{}
|
||||
}
|
||||
|
||||
// DBPointer refers to a document id in a namespace.
|
||||
//
|
||||
// This type is deprecated in the BSON specification and should not be used
|
||||
// except for backwards compatibility with ancient applications.
|
||||
type DBPointer struct {
|
||||
Namespace string
|
||||
Id ObjectId
|
||||
}
|
||||
|
||||
const initialBufferSize = 64
|
||||
|
||||
func handleErr(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(externalPanic); ok {
|
||||
panic(r)
|
||||
} else if s, ok := r.(string); ok {
|
||||
*err = errors.New(s)
|
||||
} else if e, ok := r.(error); ok {
|
||||
*err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal serializes the in value, which may be a map or a struct value.
|
||||
// In the case of struct values, only exported fields will be serialized,
|
||||
// and the order of serialized fields will match that of the struct itself.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
//
|
||||
// minsize Marshal an int64 value as an int32, if that's feasible
|
||||
// while preserving the numeric value.
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the bson keys of other struct fields.
|
||||
//
|
||||
// Some examples:
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B int "myb"
|
||||
// C string "myc,omitempty"
|
||||
// D string `bson:",omitempty" json:"jsonkey"`
|
||||
// E int64 ",minsize"
|
||||
// F int64 "myf,omitempty,minsize"
|
||||
// }
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := &encoder{make([]byte, 0, initialBufferSize)}
|
||||
e.addDoc(reflect.ValueOf(in))
|
||||
return e.out, nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes data from in into the out value. The out value
|
||||
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
|
||||
// In the case of struct values, only exported fields will be deserialized.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported during unmarshal (see the
|
||||
// Marshal method for other flags):
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map.
|
||||
// Inlined structs are handled as if its fields were part
|
||||
// of the outer struct. An inlined map causes keys that do
|
||||
// not match any other struct field to be inserted in the
|
||||
// map rather than being discarded as usual.
|
||||
//
|
||||
// The target field or element types of out may not necessarily match
|
||||
// the BSON values of the provided data. The following conversions are
|
||||
// made automatically:
|
||||
//
|
||||
// - Numeric types are converted if at least the integer part of the
|
||||
// value would be preserved correctly
|
||||
// - Bools are converted to numeric types as 1 or 0
|
||||
// - Numeric types are converted to bools as true if not 0 or false otherwise
|
||||
// - Binary and string BSON data is converted to a string, array or byte slice
|
||||
//
|
||||
// If the value would not fit the type and cannot be converted, it's
|
||||
// silently skipped.
|
||||
//
|
||||
// Pointer values are initialized when necessary.
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
if raw, ok := out.(*Raw); ok {
|
||||
raw.Kind = 3
|
||||
raw.Data = in
|
||||
return nil
|
||||
}
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(in)
|
||||
d.readDocTo(v)
|
||||
case reflect.Struct:
|
||||
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Unmarshal needs a map or a pointer to a struct.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes raw into the out value. If the out value type
|
||||
// is not compatible with raw, a *bson.TypeError is returned.
|
||||
//
|
||||
// See the Unmarshal function documentation for more details on the
|
||||
// unmarshalling process.
|
||||
func (raw Raw) Unmarshal(out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
v = v.Elem()
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(raw.Data)
|
||||
good := d.readElemTo(v, raw.Kind)
|
||||
if !good {
|
||||
return &TypeError{v.Type(), raw.Kind}
|
||||
}
|
||||
case reflect.Struct:
|
||||
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
|
||||
default:
|
||||
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TypeError struct {
|
||||
Type reflect.Type
|
||||
Kind byte
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
InlineMap int
|
||||
Zero reflect.Value
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
MinSize bool
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var structMapMutex sync.RWMutex
|
||||
|
||||
type externalPanic string
|
||||
|
||||
func (e externalPanic) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
structMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
structMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("bson")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "minsize":
|
||||
info.MinSize = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
panic("Option ,inline needs a struct value or map field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
sinfo = &structInfo{
|
||||
fieldsMap,
|
||||
fieldsList,
|
||||
inlineMap,
|
||||
reflect.New(st).Elem(),
|
||||
}
|
||||
structMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
structMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
||||
1832
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
1832
vendor/gopkg.in/mgo.v2/bson/bson_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
310
vendor/gopkg.in/mgo.v2/bson/decimal.go
generated
vendored
310
vendor/gopkg.in/mgo.v2/bson/decimal.go
generated
vendored
@@ -1,310 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Decimal128 holds decimal128 BSON values.
|
||||
type Decimal128 struct {
|
||||
h, l uint64
|
||||
}
|
||||
|
||||
func (d Decimal128) String() string {
|
||||
var pos int // positive sign
|
||||
var e int // exponent
|
||||
var h, l uint64 // significand high/low
|
||||
|
||||
if d.h>>63&1 == 0 {
|
||||
pos = 1
|
||||
}
|
||||
|
||||
switch d.h >> 58 & (1<<5 - 1) {
|
||||
case 0x1F:
|
||||
return "NaN"
|
||||
case 0x1E:
|
||||
return "-Inf"[pos:]
|
||||
}
|
||||
|
||||
l = d.l
|
||||
if d.h>>61&3 == 3 {
|
||||
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
|
||||
// Implicit 0b100 prefix in significand.
|
||||
e = int(d.h>>47&(1<<14-1)) - 6176
|
||||
//h = 4<<47 | d.h&(1<<47-1)
|
||||
// Spec says all of these values are out of range.
|
||||
h, l = 0, 0
|
||||
} else {
|
||||
// Bits: 1*sign 14*exponent 113*significand
|
||||
e = int(d.h>>49&(1<<14-1)) - 6176
|
||||
h = d.h & (1<<49 - 1)
|
||||
}
|
||||
|
||||
// Would be handled by the logic below, but that's trivial and common.
|
||||
if h == 0 && l == 0 && e == 0 {
|
||||
return "-0"[pos:]
|
||||
}
|
||||
|
||||
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
|
||||
var last = len(repr)
|
||||
var i = len(repr)
|
||||
var dot = len(repr) + e
|
||||
var rem uint32
|
||||
Loop:
|
||||
for d9 := 0; d9 < 5; d9++ {
|
||||
h, l, rem = divmod(h, l, 1e9)
|
||||
for d1 := 0; d1 < 9; d1++ {
|
||||
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
|
||||
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
|
||||
e += len(repr) - i
|
||||
i--
|
||||
repr[i] = '.'
|
||||
last = i - 1
|
||||
dot = len(repr) // Unmark.
|
||||
}
|
||||
c := '0' + byte(rem%10)
|
||||
rem /= 10
|
||||
i--
|
||||
repr[i] = c
|
||||
// Handle "0E+3", "1E+3", etc.
|
||||
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
|
||||
last = i
|
||||
break Loop
|
||||
}
|
||||
if c != '0' {
|
||||
last = i
|
||||
}
|
||||
// Break early. Works without it, but why.
|
||||
if dot > i && l == 0 && h == 0 && rem == 0 {
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
repr[last-1] = '-'
|
||||
last--
|
||||
|
||||
if e > 0 {
|
||||
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
|
||||
}
|
||||
if e < 0 {
|
||||
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
|
||||
}
|
||||
return string(repr[last+pos:])
|
||||
}
|
||||
|
||||
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
|
||||
div64 := uint64(div)
|
||||
a := h >> 32
|
||||
aq := a / div64
|
||||
ar := a % div64
|
||||
b := ar<<32 + h&(1<<32-1)
|
||||
bq := b / div64
|
||||
br := b % div64
|
||||
c := br<<32 + l>>32
|
||||
cq := c / div64
|
||||
cr := c % div64
|
||||
d := cr<<32 + l&(1<<32-1)
|
||||
dq := d / div64
|
||||
dr := d % div64
|
||||
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
|
||||
}
|
||||
|
||||
var dNaN = Decimal128{0x1F << 58, 0}
|
||||
var dPosInf = Decimal128{0x1E << 58, 0}
|
||||
var dNegInf = Decimal128{0x3E << 58, 0}
|
||||
|
||||
func dErr(s string) (Decimal128, error) {
|
||||
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
|
||||
}
|
||||
|
||||
func ParseDecimal128(s string) (Decimal128, error) {
|
||||
orig := s
|
||||
if s == "" {
|
||||
return dErr(orig)
|
||||
}
|
||||
neg := s[0] == '-'
|
||||
if neg || s[0] == '+' {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
|
||||
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
|
||||
return dNaN, nil
|
||||
}
|
||||
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
|
||||
if neg {
|
||||
return dNegInf, nil
|
||||
}
|
||||
return dPosInf, nil
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
var h, l uint64
|
||||
var e int
|
||||
|
||||
var add, ovr uint32
|
||||
var mul uint32 = 1
|
||||
var dot = -1
|
||||
var digits = 0
|
||||
var i = 0
|
||||
for i < len(s) {
|
||||
c := s[i]
|
||||
if mul == 1e9 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
mul, add = 1, 0
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
i++
|
||||
if c > '0' || digits > 0 {
|
||||
digits++
|
||||
}
|
||||
if digits > 34 {
|
||||
if c == '0' {
|
||||
// Exact rounding.
|
||||
e++
|
||||
continue
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
mul *= 10
|
||||
add *= 10
|
||||
add += uint32(c - '0')
|
||||
continue
|
||||
}
|
||||
if c == '.' {
|
||||
i++
|
||||
if dot >= 0 || i == 1 && len(s) == 1 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if i == len(s) {
|
||||
break
|
||||
}
|
||||
if s[i] < '0' || s[i] > '9' || e > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
dot = i
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if i == 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if mul > 1 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if dot >= 0 {
|
||||
e += dot - i
|
||||
}
|
||||
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
|
||||
i++
|
||||
eneg := s[i] == '-'
|
||||
if eneg || s[i] == '+' {
|
||||
i++
|
||||
if i == len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
n := 0
|
||||
for i < len(s) && n < 1e4 {
|
||||
c := s[i]
|
||||
i++
|
||||
if c < '0' || c > '9' {
|
||||
return dErr(orig)
|
||||
}
|
||||
n *= 10
|
||||
n += int(c - '0')
|
||||
}
|
||||
if eneg {
|
||||
n = -n
|
||||
}
|
||||
e += n
|
||||
for e < -6176 {
|
||||
// Subnormal.
|
||||
var div uint32 = 1
|
||||
for div < 1e9 && e < -6176 {
|
||||
div *= 10
|
||||
e++
|
||||
}
|
||||
var rem uint32
|
||||
h, l, rem = divmod(h, l, div)
|
||||
if rem > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
for e > 6111 {
|
||||
// Clamped.
|
||||
var mul uint32 = 1
|
||||
for mul < 1e9 && e > 6111 {
|
||||
mul *= 10
|
||||
e--
|
||||
}
|
||||
h, l, ovr = muladd(h, l, mul, 0)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if e < -6176 || e > 6111 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
|
||||
if i < len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
h |= uint64(e+6176) & uint64(1<<14-1) << 49
|
||||
if neg {
|
||||
h |= 1 << 63
|
||||
}
|
||||
return Decimal128{h, l}, nil
|
||||
}
|
||||
|
||||
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
|
||||
mul64 := uint64(mul)
|
||||
a := mul64 * (l & (1<<32 - 1))
|
||||
b := a>>32 + mul64*(l>>32)
|
||||
c := b>>32 + mul64*(h&(1<<32-1))
|
||||
d := c>>32 + mul64*(h>>32)
|
||||
|
||||
a = a&(1<<32-1) + uint64(add)
|
||||
b = b&(1<<32-1) + a>>32
|
||||
c = c&(1<<32-1) + b>>32
|
||||
d = d&(1<<32-1) + c>>32
|
||||
|
||||
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
|
||||
}
|
||||
4109
vendor/gopkg.in/mgo.v2/bson/decimal_test.go
generated
vendored
4109
vendor/gopkg.in/mgo.v2/bson/decimal_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
849
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
849
vendor/gopkg.in/mgo.v2/bson/decode.go
generated
vendored
@@ -1,849 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
in []byte
|
||||
i int
|
||||
docType reflect.Type
|
||||
}
|
||||
|
||||
var typeM = reflect.TypeOf(M{})
|
||||
|
||||
func newDecoder(in []byte) *decoder {
|
||||
return &decoder{in, 0, typeM}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some helper functions.
|
||||
|
||||
func corrupted() {
|
||||
panic("Document is corrupted")
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of documents.
|
||||
|
||||
const (
|
||||
setterUnknown = iota
|
||||
setterNone
|
||||
setterType
|
||||
setterAddr
|
||||
)
|
||||
|
||||
var setterStyles map[reflect.Type]int
|
||||
var setterIface reflect.Type
|
||||
var setterMutex sync.RWMutex
|
||||
|
||||
func init() {
|
||||
var iface Setter
|
||||
setterIface = reflect.TypeOf(&iface).Elem()
|
||||
setterStyles = make(map[reflect.Type]int)
|
||||
}
|
||||
|
||||
func setterStyle(outt reflect.Type) int {
|
||||
setterMutex.RLock()
|
||||
style := setterStyles[outt]
|
||||
setterMutex.RUnlock()
|
||||
if style == setterUnknown {
|
||||
setterMutex.Lock()
|
||||
defer setterMutex.Unlock()
|
||||
if outt.Implements(setterIface) {
|
||||
setterStyles[outt] = setterType
|
||||
} else if reflect.PtrTo(outt).Implements(setterIface) {
|
||||
setterStyles[outt] = setterAddr
|
||||
} else {
|
||||
setterStyles[outt] = setterNone
|
||||
}
|
||||
style = setterStyles[outt]
|
||||
}
|
||||
return style
|
||||
}
|
||||
|
||||
func getSetter(outt reflect.Type, out reflect.Value) Setter {
|
||||
style := setterStyle(outt)
|
||||
if style == setterNone {
|
||||
return nil
|
||||
}
|
||||
if style == setterAddr {
|
||||
if !out.CanAddr() {
|
||||
return nil
|
||||
}
|
||||
out = out.Addr()
|
||||
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
return out.Interface().(Setter)
|
||||
}
|
||||
|
||||
func clearMap(m reflect.Value) {
|
||||
var none reflect.Value
|
||||
for _, k := range m.MapKeys() {
|
||||
m.SetMapIndex(k, none)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readDocTo(out reflect.Value) {
|
||||
var elemType reflect.Type
|
||||
outt := out.Type()
|
||||
outk := outt.Kind()
|
||||
|
||||
for {
|
||||
if outk == reflect.Ptr && out.IsNil() {
|
||||
out.Set(reflect.New(outt.Elem()))
|
||||
}
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
var raw Raw
|
||||
d.readDocTo(reflect.ValueOf(&raw))
|
||||
err := setter.SetBSON(raw)
|
||||
if _, ok := err.(*TypeError); err != nil && !ok {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if outk == reflect.Ptr {
|
||||
out = out.Elem()
|
||||
outt = out.Type()
|
||||
outk = out.Kind()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
var fieldsMap map[string]fieldInfo
|
||||
var inlineMap reflect.Value
|
||||
start := d.i
|
||||
|
||||
origout := out
|
||||
if outk == reflect.Interface {
|
||||
if d.docType.Kind() == reflect.Map {
|
||||
mv := reflect.MakeMap(d.docType)
|
||||
out.Set(mv)
|
||||
out = mv
|
||||
} else {
|
||||
dv := reflect.New(d.docType).Elem()
|
||||
out.Set(dv)
|
||||
out = dv
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
docType := d.docType
|
||||
keyType := typeString
|
||||
convertKey := false
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
keyType = outt.Key()
|
||||
if keyType.Kind() != reflect.String {
|
||||
panic("BSON map must have string keys. Got: " + outt.String())
|
||||
}
|
||||
if keyType != typeString {
|
||||
convertKey = true
|
||||
}
|
||||
elemType = outt.Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = outt
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(out.Type()))
|
||||
} else if out.Len() > 0 {
|
||||
clearMap(out)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt != typeRaw {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fieldsMap = sinfo.FieldsMap
|
||||
out.Set(sinfo.Zero)
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
|
||||
clearMap(inlineMap)
|
||||
}
|
||||
elemType = inlineMap.Type().Elem()
|
||||
if elemType == typeIface {
|
||||
d.docType = inlineMap.Type()
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
origout.Set(d.readDocElems(outt))
|
||||
return
|
||||
case typeRawDocElem:
|
||||
origout.Set(d.readRawDocElems(outt))
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
panic("Unsupported document type for unmarshalling: " + out.Type().String())
|
||||
}
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Map:
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
k := reflect.ValueOf(name)
|
||||
if convertKey {
|
||||
k = k.Convert(keyType)
|
||||
}
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeRaw {
|
||||
d.dropElem(kind)
|
||||
} else {
|
||||
if info, ok := fieldsMap[name]; ok {
|
||||
if info.Inline == nil {
|
||||
d.readElemTo(out.Field(info.Num), kind)
|
||||
} else {
|
||||
d.readElemTo(out.FieldByIndex(info.Inline), kind)
|
||||
}
|
||||
} else if inlineMap.IsValid() {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
|
||||
}
|
||||
} else {
|
||||
d.dropElem(kind)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
}
|
||||
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
d.docType = docType
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readArrayDocTo(out reflect.Value) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
i := 0
|
||||
l := out.Len()
|
||||
for d.in[d.i] != '\x00' {
|
||||
if i >= l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
d.readElemTo(out.Index(i), kind)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i != l {
|
||||
panic("Length mismatch on array field")
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
|
||||
tmp := make([]reflect.Value, 0, 8)
|
||||
elemType := t.Elem()
|
||||
if elemType == typeRawDocElem {
|
||||
d.dropElem(0x04)
|
||||
return reflect.Zero(t).Interface()
|
||||
}
|
||||
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
for d.i < end && d.in[d.i] != '\x00' {
|
||||
d.i++
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
d.i++
|
||||
e := reflect.New(elemType).Elem()
|
||||
if d.readElemTo(e, kind) {
|
||||
tmp = append(tmp, e)
|
||||
}
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
|
||||
n := len(tmp)
|
||||
slice := reflect.MakeSlice(t, n, n)
|
||||
for i := 0; i != n; i++ {
|
||||
slice.Index(i).Set(tmp[i])
|
||||
}
|
||||
return slice.Interface()
|
||||
}
|
||||
|
||||
var typeSlice = reflect.TypeOf([]interface{}{})
|
||||
var typeIface = typeSlice.Elem()
|
||||
|
||||
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]DocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := DocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
|
||||
docType := d.docType
|
||||
d.docType = typ
|
||||
slice := make([]RawDocElem, 0, 8)
|
||||
d.readDocWith(func(kind byte, name string) {
|
||||
e := RawDocElem{Name: name}
|
||||
v := reflect.ValueOf(&e.Value)
|
||||
if d.readElemTo(v.Elem(), kind) {
|
||||
slice = append(slice, e)
|
||||
}
|
||||
})
|
||||
slicev := reflect.New(typ).Elem()
|
||||
slicev.Set(reflect.ValueOf(slice))
|
||||
d.docType = docType
|
||||
return slicev
|
||||
}
|
||||
|
||||
func (d *decoder) readDocWith(f func(kind byte, name string)) {
|
||||
end := int(d.readInt32())
|
||||
end += d.i - 4
|
||||
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
for d.in[d.i] != '\x00' {
|
||||
kind := d.readByte()
|
||||
name := d.readCStr()
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
f(kind, name)
|
||||
if d.i >= end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
d.i++ // '\x00'
|
||||
if d.i != end {
|
||||
corrupted()
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unmarshaling of individual elements within a document.
|
||||
|
||||
var blackHole = settableValueOf(struct{}{})
|
||||
|
||||
func (d *decoder) dropElem(kind byte) {
|
||||
d.readElemTo(blackHole, kind)
|
||||
}
|
||||
|
||||
// Attempt to decode an element from the document and put it into out.
|
||||
// If the types are not compatible, the returned ok value will be
|
||||
// false and out will be unchanged.
|
||||
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
|
||||
|
||||
start := d.i
|
||||
|
||||
if kind == 0x03 {
|
||||
// Delegate unmarshaling of documents.
|
||||
outt := out.Type()
|
||||
outk := out.Kind()
|
||||
switch outk {
|
||||
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if setterStyle(outt) != setterNone {
|
||||
d.readDocTo(out)
|
||||
return true
|
||||
}
|
||||
if outk == reflect.Slice {
|
||||
switch outt.Elem() {
|
||||
case typeDocElem:
|
||||
out.Set(d.readDocElems(outt))
|
||||
case typeRawDocElem:
|
||||
out.Set(d.readRawDocElems(outt))
|
||||
default:
|
||||
d.readDocTo(blackHole)
|
||||
}
|
||||
return true
|
||||
}
|
||||
d.readDocTo(blackHole)
|
||||
return true
|
||||
}
|
||||
|
||||
var in interface{}
|
||||
|
||||
switch kind {
|
||||
case 0x01: // Float64
|
||||
in = d.readFloat64()
|
||||
case 0x02: // UTF-8 string
|
||||
in = d.readStr()
|
||||
case 0x03: // Document
|
||||
panic("Can't happen. Handled above.")
|
||||
case 0x04: // Array
|
||||
outt := out.Type()
|
||||
if setterStyle(outt) != setterNone {
|
||||
// Skip the value so its data is handed to the setter below.
|
||||
d.dropElem(kind)
|
||||
break
|
||||
}
|
||||
for outt.Kind() == reflect.Ptr {
|
||||
outt = outt.Elem()
|
||||
}
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
d.readArrayDocTo(out)
|
||||
return true
|
||||
case reflect.Slice:
|
||||
in = d.readSliceDoc(outt)
|
||||
default:
|
||||
in = d.readSliceDoc(typeSlice)
|
||||
}
|
||||
case 0x05: // Binary
|
||||
b := d.readBinary()
|
||||
if b.Kind == 0x00 || b.Kind == 0x02 {
|
||||
in = b.Data
|
||||
} else {
|
||||
in = b
|
||||
}
|
||||
case 0x06: // Undefined (obsolete, but still seen in the wild)
|
||||
in = Undefined
|
||||
case 0x07: // ObjectId
|
||||
in = ObjectId(d.readBytes(12))
|
||||
case 0x08: // Bool
|
||||
in = d.readBool()
|
||||
case 0x09: // Timestamp
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
i := d.readInt64()
|
||||
if i == -62135596800000 {
|
||||
in = time.Time{} // In UTC for convenience.
|
||||
} else {
|
||||
in = time.Unix(i/1e3, i%1e3*1e6)
|
||||
}
|
||||
case 0x0A: // Nil
|
||||
in = nil
|
||||
case 0x0B: // RegEx
|
||||
in = d.readRegEx()
|
||||
case 0x0C:
|
||||
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
|
||||
case 0x0D: // JavaScript without scope
|
||||
in = JavaScript{Code: d.readStr()}
|
||||
case 0x0E: // Symbol
|
||||
in = Symbol(d.readStr())
|
||||
case 0x0F: // JavaScript with scope
|
||||
d.i += 4 // Skip length
|
||||
js := JavaScript{d.readStr(), make(M)}
|
||||
d.readDocTo(reflect.ValueOf(js.Scope))
|
||||
in = js
|
||||
case 0x10: // Int32
|
||||
in = int(d.readInt32())
|
||||
case 0x11: // Mongo-specific timestamp
|
||||
in = MongoTimestamp(d.readInt64())
|
||||
case 0x12: // Int64
|
||||
in = d.readInt64()
|
||||
case 0x13: // Decimal128
|
||||
in = Decimal128{
|
||||
l: uint64(d.readInt64()),
|
||||
h: uint64(d.readInt64()),
|
||||
}
|
||||
case 0x7F: // Max key
|
||||
in = MaxKey
|
||||
case 0xFF: // Min key
|
||||
in = MinKey
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
|
||||
}
|
||||
|
||||
outt := out.Type()
|
||||
|
||||
if outt == typeRaw {
|
||||
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
|
||||
return true
|
||||
}
|
||||
|
||||
if setter := getSetter(outt, out); setter != nil {
|
||||
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
|
||||
if err == SetZero {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*TypeError); !ok {
|
||||
panic(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
out.Set(reflect.Zero(outt))
|
||||
return true
|
||||
}
|
||||
|
||||
outk := outt.Kind()
|
||||
|
||||
// Dereference and initialize pointer if necessary.
|
||||
first := true
|
||||
for outk == reflect.Ptr {
|
||||
if !out.IsNil() {
|
||||
out = out.Elem()
|
||||
} else {
|
||||
elem := reflect.New(outt.Elem())
|
||||
if first {
|
||||
// Only set if value is compatible.
|
||||
first = false
|
||||
defer func(out, elem reflect.Value) {
|
||||
if good {
|
||||
out.Set(elem)
|
||||
}
|
||||
}(out, elem)
|
||||
} else {
|
||||
out.Set(elem)
|
||||
}
|
||||
out = elem
|
||||
}
|
||||
outt = out.Type()
|
||||
outk = outt.Kind()
|
||||
}
|
||||
|
||||
inv := reflect.ValueOf(in)
|
||||
if outt == inv.Type() {
|
||||
out.Set(inv)
|
||||
return true
|
||||
}
|
||||
|
||||
switch outk {
|
||||
case reflect.Interface:
|
||||
out.Set(inv)
|
||||
return true
|
||||
case reflect.String:
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
out.SetString(inv.String())
|
||||
return true
|
||||
case reflect.Slice:
|
||||
if b, ok := in.([]byte); ok {
|
||||
out.SetString(string(b))
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatInt(inv.Int(), 10))
|
||||
return true
|
||||
}
|
||||
case reflect.Float64:
|
||||
if outt == typeJSONNumber {
|
||||
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
// Remember, array (0x04) slices are built with the correct
|
||||
// element type. If we are here, must be a cross BSON kind
|
||||
// conversion (e.g. 0x05 unmarshalling on string).
|
||||
if outt.Elem().Kind() != reflect.Uint8 {
|
||||
break
|
||||
}
|
||||
switch inv.Kind() {
|
||||
case reflect.String:
|
||||
slice := []byte(inv.String())
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
return true
|
||||
case reflect.Slice:
|
||||
switch outt.Kind() {
|
||||
case reflect.Array:
|
||||
reflect.Copy(out, inv)
|
||||
case reflect.Slice:
|
||||
out.SetBytes(inv.Bytes())
|
||||
}
|
||||
return true
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetInt(inv.Int())
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetInt(int64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetInt(1)
|
||||
} else {
|
||||
out.SetInt(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("can't happen: no uint types in BSON (!?)")
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch inv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetUint(uint64(inv.Int()))
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetUint(uint64(inv.Float()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetUint(1)
|
||||
} else {
|
||||
out.SetUint(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON.")
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch inv.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetFloat(inv.Float())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetFloat(float64(inv.Int()))
|
||||
return true
|
||||
case reflect.Bool:
|
||||
if inv.Bool() {
|
||||
out.SetFloat(1)
|
||||
} else {
|
||||
out.SetFloat(0)
|
||||
}
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch inv.Kind() {
|
||||
case reflect.Bool:
|
||||
out.SetBool(inv.Bool())
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
out.SetBool(inv.Int() != 0)
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
out.SetBool(inv.Float() != 0)
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
panic("Can't happen. No uint types in BSON?")
|
||||
}
|
||||
case reflect.Struct:
|
||||
if outt == typeURL && inv.Kind() == reflect.String {
|
||||
u, err := url.Parse(inv.String())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out.Set(reflect.ValueOf(u).Elem())
|
||||
return true
|
||||
}
|
||||
if outt == typeBinary {
|
||||
if b, ok := in.([]byte); ok {
|
||||
out.Set(reflect.ValueOf(Binary{Data: b}))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Parsers of basic types.
|
||||
|
||||
func (d *decoder) readRegEx() RegEx {
|
||||
re := RegEx{}
|
||||
re.Pattern = d.readCStr()
|
||||
re.Options = d.readCStr()
|
||||
return re
|
||||
}
|
||||
|
||||
func (d *decoder) readBinary() Binary {
|
||||
l := d.readInt32()
|
||||
b := Binary{}
|
||||
b.Kind = d.readByte()
|
||||
b.Data = d.readBytes(l)
|
||||
if b.Kind == 0x02 && len(b.Data) >= 4 {
|
||||
// Weird obsolete format with redundant length.
|
||||
b.Data = b.Data[4:]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *decoder) readStr() string {
|
||||
l := d.readInt32()
|
||||
b := d.readBytes(l - 1)
|
||||
if d.readByte() != '\x00' {
|
||||
corrupted()
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (d *decoder) readCStr() string {
|
||||
start := d.i
|
||||
end := start
|
||||
l := len(d.in)
|
||||
for ; end != l; end++ {
|
||||
if d.in[end] == '\x00' {
|
||||
break
|
||||
}
|
||||
}
|
||||
d.i = end + 1
|
||||
if d.i > l {
|
||||
corrupted()
|
||||
}
|
||||
return string(d.in[start:end])
|
||||
}
|
||||
|
||||
func (d *decoder) readBool() bool {
|
||||
b := d.readByte()
|
||||
if b == 0 {
|
||||
return false
|
||||
}
|
||||
if b == 1 {
|
||||
return true
|
||||
}
|
||||
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
|
||||
}
|
||||
|
||||
func (d *decoder) readFloat64() float64 {
|
||||
return math.Float64frombits(uint64(d.readInt64()))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt32() int32 {
|
||||
b := d.readBytes(4)
|
||||
return int32((uint32(b[0]) << 0) |
|
||||
(uint32(b[1]) << 8) |
|
||||
(uint32(b[2]) << 16) |
|
||||
(uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
func (d *decoder) readInt64() int64 {
|
||||
b := d.readBytes(8)
|
||||
return int64((uint64(b[0]) << 0) |
|
||||
(uint64(b[1]) << 8) |
|
||||
(uint64(b[2]) << 16) |
|
||||
(uint64(b[3]) << 24) |
|
||||
(uint64(b[4]) << 32) |
|
||||
(uint64(b[5]) << 40) |
|
||||
(uint64(b[6]) << 48) |
|
||||
(uint64(b[7]) << 56))
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() byte {
|
||||
i := d.i
|
||||
d.i++
|
||||
if d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[i]
|
||||
}
|
||||
|
||||
func (d *decoder) readBytes(length int32) []byte {
|
||||
if length < 0 {
|
||||
corrupted()
|
||||
}
|
||||
start := d.i
|
||||
d.i += int(length)
|
||||
if d.i < start || d.i > len(d.in) {
|
||||
corrupted()
|
||||
}
|
||||
return d.in[start : start+int(length)]
|
||||
}
|
||||
514
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
514
vendor/gopkg.in/mgo.v2/bson/encode.go
generated
vendored
@@ -1,514 +0,0 @@
|
||||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some internal infrastructure.
|
||||
|
||||
var (
|
||||
typeBinary = reflect.TypeOf(Binary{})
|
||||
typeObjectId = reflect.TypeOf(ObjectId(""))
|
||||
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
|
||||
typeSymbol = reflect.TypeOf(Symbol(""))
|
||||
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
|
||||
typeOrderKey = reflect.TypeOf(MinKey)
|
||||
typeDocElem = reflect.TypeOf(DocElem{})
|
||||
typeRawDocElem = reflect.TypeOf(RawDocElem{})
|
||||
typeRaw = reflect.TypeOf(Raw{})
|
||||
typeURL = reflect.TypeOf(url.URL{})
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeString = reflect.TypeOf("")
|
||||
typeJSONNumber = reflect.TypeOf(json.Number(""))
|
||||
)
|
||||
|
||||
const itoaCacheSize = 32
|
||||
|
||||
var itoaCache []string
|
||||
|
||||
func init() {
|
||||
itoaCache = make([]string, itoaCacheSize)
|
||||
for i := 0; i != itoaCacheSize; i++ {
|
||||
itoaCache[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int) string {
|
||||
if i < itoaCacheSize {
|
||||
return itoaCache[i]
|
||||
}
|
||||
return strconv.Itoa(i)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of the document value itself.
|
||||
|
||||
type encoder struct {
|
||||
out []byte
|
||||
}
|
||||
|
||||
func (e *encoder) addDoc(v reflect.Value) {
|
||||
for {
|
||||
if vi, ok := v.Interface().(Getter); ok {
|
||||
getv, err := vi.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
v = reflect.ValueOf(getv)
|
||||
continue
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if v.Type() == typeRaw {
|
||||
raw := v.Interface().(Raw)
|
||||
if raw.Kind != 0x03 && raw.Kind != 0x00 {
|
||||
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
|
||||
}
|
||||
if len(raw.Data) == 0 {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addBytes(raw.Data...)
|
||||
return
|
||||
}
|
||||
|
||||
start := e.reserveInt32()
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
e.addMap(v)
|
||||
case reflect.Struct:
|
||||
e.addStruct(v)
|
||||
case reflect.Array, reflect.Slice:
|
||||
e.addSlice(v)
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " as a BSON document")
|
||||
}
|
||||
|
||||
e.addBytes(0)
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
func (e *encoder) addMap(v reflect.Value) {
|
||||
for _, k := range v.MapKeys() {
|
||||
e.addElem(k.String(), v.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) addStruct(v reflect.Value) {
|
||||
sinfo, err := getStructInfo(v.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var value reflect.Value
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := v.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
for _, k := range m.MapKeys() {
|
||||
ks := k.String()
|
||||
if _, found := sinfo.FieldsMap[ks]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
|
||||
}
|
||||
e.addElem(ks, m.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, info := range sinfo.FieldsList {
|
||||
if info.Inline == nil {
|
||||
value = v.Field(info.Num)
|
||||
} else {
|
||||
value = v.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.addElem(info.Key, value, info.MinSize)
|
||||
}
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
if vt == typeTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *encoder) addSlice(v reflect.Value) {
|
||||
vi := v.Interface()
|
||||
if d, ok := vi.(D); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if d, ok := vi.(RawD); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
l := v.Len()
|
||||
et := v.Type().Elem()
|
||||
if et == typeDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(DocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if et == typeRawDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(RawDocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
e.addElem(itoa(i), v.Index(i), false)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of elements in a document.
|
||||
|
||||
func (e *encoder) addElemName(kind byte, name string) {
|
||||
e.addBytes(kind)
|
||||
e.addBytes([]byte(name)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
|
||||
|
||||
if !v.IsValid() {
|
||||
e.addElemName(0x0A, name)
|
||||
return
|
||||
}
|
||||
|
||||
if getter, ok := v.Interface().(Getter); ok {
|
||||
getv, err := getter.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.addElem(name, reflect.ValueOf(getv), minSize)
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
|
||||
case reflect.Interface:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.Ptr:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.String:
|
||||
s := v.String()
|
||||
switch v.Type() {
|
||||
case typeObjectId:
|
||||
if len(s) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s)) + ")")
|
||||
}
|
||||
e.addElemName(0x07, name)
|
||||
e.addBytes([]byte(s)...)
|
||||
case typeSymbol:
|
||||
e.addElemName(0x0E, name)
|
||||
e.addStr(s)
|
||||
case typeJSONNumber:
|
||||
n := v.Interface().(json.Number)
|
||||
if i, err := n.Int64(); err == nil {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(i)
|
||||
} else if f, err := n.Float64(); err == nil {
|
||||
e.addElemName(0x01, name)
|
||||
e.addFloat64(f)
|
||||
} else {
|
||||
panic("failed to convert json.Number to a number: " + s)
|
||||
}
|
||||
default:
|
||||
e.addElemName(0x02, name)
|
||||
e.addStr(s)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.addElemName(0x01, name)
|
||||
e.addFloat64(v.Float())
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
u := v.Uint()
|
||||
if int64(u) < 0 {
|
||||
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
|
||||
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
|
||||
e.addElemName(0x10, name)
|
||||
e.addInt32(int32(u))
|
||||
} else {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(int64(u))
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch v.Type() {
|
||||
case typeMongoTimestamp:
|
||||
e.addElemName(0x11, name)
|
||||
e.addInt64(v.Int())
|
||||
|
||||
case typeOrderKey:
|
||||
if v.Int() == int64(MaxKey) {
|
||||
e.addElemName(0x7F, name)
|
||||
} else {
|
||||
e.addElemName(0xFF, name)
|
||||
}
|
||||
|
||||
default:
|
||||
i := v.Int()
|
||||
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
// It fits into an int32, encode as such.
|
||||
e.addElemName(0x10, name)
|
||||
e.addInt32(int32(i))
|
||||
} else {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(i)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
e.addElemName(0x08, name)
|
||||
if v.Bool() {
|
||||
e.addBytes(1)
|
||||
} else {
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
|
||||
case reflect.Slice:
|
||||
vt := v.Type()
|
||||
et := vt.Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName(0x05, name)
|
||||
e.addBinary(0x00, v.Bytes())
|
||||
} else if et == typeDocElem || et == typeRawDocElem {
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Array:
|
||||
et := v.Type().Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
e.addElemName(0x05, name)
|
||||
if v.CanAddr() {
|
||||
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
|
||||
} else {
|
||||
n := v.Len()
|
||||
e.addInt32(int32(n))
|
||||
e.addBytes(0x00)
|
||||
for i := 0; i < n; i++ {
|
||||
el := v.Index(i)
|
||||
e.addBytes(byte(el.Uint()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
switch s := v.Interface().(type) {
|
||||
|
||||
case Raw:
|
||||
kind := s.Kind
|
||||
if kind == 0x00 {
|
||||
kind = 0x03
|
||||
}
|
||||
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addElemName(kind, name)
|
||||
e.addBytes(s.Data...)
|
||||
|
||||
case Binary:
|
||||
e.addElemName(0x05, name)
|
||||
e.addBinary(s.Kind, s.Data)
|
||||
|
||||
case Decimal128:
|
||||
e.addElemName(0x13, name)
|
||||
e.addInt64(int64(s.l))
|
||||
e.addInt64(int64(s.h))
|
||||
|
||||
case DBPointer:
|
||||
e.addElemName(0x0C, name)
|
||||
e.addStr(s.Namespace)
|
||||
if len(s.Id) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s.Id)) + ")")
|
||||
}
|
||||
e.addBytes([]byte(s.Id)...)
|
||||
|
||||
case RegEx:
|
||||
e.addElemName(0x0B, name)
|
||||
e.addCStr(s.Pattern)
|
||||
e.addCStr(s.Options)
|
||||
|
||||
case JavaScript:
|
||||
if s.Scope == nil {
|
||||
e.addElemName(0x0D, name)
|
||||
e.addStr(s.Code)
|
||||
} else {
|
||||
e.addElemName(0x0F, name)
|
||||
start := e.reserveInt32()
|
||||
e.addStr(s.Code)
|
||||
e.addDoc(reflect.ValueOf(s.Scope))
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
case time.Time:
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
e.addElemName(0x09, name)
|
||||
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
|
||||
|
||||
case url.URL:
|
||||
e.addElemName(0x02, name)
|
||||
e.addStr(s.String())
|
||||
|
||||
case undefined:
|
||||
e.addElemName(0x06, name)
|
||||
|
||||
default:
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " in a BSON document")
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of base types.
|
||||
|
||||
func (e *encoder) addBinary(subtype byte, v []byte) {
|
||||
if subtype == 0x02 {
|
||||
// Wonder how that brilliant idea came to life. Obsolete, luckily.
|
||||
e.addInt32(int32(len(v) + 4))
|
||||
e.addBytes(subtype)
|
||||
e.addInt32(int32(len(v)))
|
||||
} else {
|
||||
e.addInt32(int32(len(v)))
|
||||
e.addBytes(subtype)
|
||||
}
|
||||
e.addBytes(v...)
|
||||
}
|
||||
|
||||
func (e *encoder) addStr(v string) {
|
||||
e.addInt32(int32(len(v) + 1))
|
||||
e.addCStr(v)
|
||||
}
|
||||
|
||||
func (e *encoder) addCStr(v string) {
|
||||
e.addBytes([]byte(v)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) reserveInt32() (pos int) {
|
||||
pos = len(e.out)
|
||||
e.addBytes(0, 0, 0, 0)
|
||||
return pos
|
||||
}
|
||||
|
||||
func (e *encoder) setInt32(pos int, v int32) {
|
||||
e.out[pos+0] = byte(v)
|
||||
e.out[pos+1] = byte(v >> 8)
|
||||
e.out[pos+2] = byte(v >> 16)
|
||||
e.out[pos+3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func (e *encoder) addInt32(v int32) {
|
||||
u := uint32(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
|
||||
}
|
||||
|
||||
func (e *encoder) addInt64(v int64) {
|
||||
u := uint64(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
|
||||
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
|
||||
}
|
||||
|
||||
func (e *encoder) addFloat64(v float64) {
|
||||
e.addInt64(int64(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
func (e *encoder) addBytes(v ...byte) {
|
||||
e.out = append(e.out, v...)
|
||||
}
|
||||
380
vendor/gopkg.in/mgo.v2/bson/json.go
generated
vendored
380
vendor/gopkg.in/mgo.v2/bson/json.go
generated
vendored
@@ -1,380 +0,0 @@
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"gopkg.in/mgo.v2/internal/json"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
|
||||
// syntax as defined in BSON's extended JSON specification.
|
||||
func UnmarshalJSON(data []byte, value interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||
d.Extend(&jsonExt)
|
||||
return d.Decode(value)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a JSON value that may hold non-standard
|
||||
// syntax as defined in BSON's extended JSON specification.
|
||||
func MarshalJSON(value interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
e := json.NewEncoder(&buf)
|
||||
e.Extend(&jsonExt)
|
||||
err := e.Encode(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// jdec is used internally by the JSON decoding functions
|
||||
// so they may unmarshal functions without getting into endless
|
||||
// recursion due to keyed objects.
|
||||
func jdec(data []byte, value interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||
d.Extend(&funcExt)
|
||||
return d.Decode(value)
|
||||
}
|
||||
|
||||
var jsonExt json.Extension
|
||||
var funcExt json.Extension
|
||||
|
||||
// TODO
|
||||
// - Shell regular expressions ("/regexp/opts")
|
||||
|
||||
func init() {
|
||||
jsonExt.DecodeUnquotedKeys(true)
|
||||
jsonExt.DecodeTrailingCommas(true)
|
||||
|
||||
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
|
||||
jsonExt.DecodeKeyed("$binary", jdecBinary)
|
||||
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
|
||||
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
|
||||
jsonExt.EncodeType(Binary{}, jencBinaryType)
|
||||
|
||||
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
|
||||
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
|
||||
jsonExt.DecodeKeyed("$date", jdecDate)
|
||||
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
|
||||
jsonExt.EncodeType(time.Time{}, jencDate)
|
||||
|
||||
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
|
||||
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
|
||||
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
|
||||
|
||||
funcExt.DecodeConst("undefined", Undefined)
|
||||
|
||||
jsonExt.DecodeKeyed("$regex", jdecRegEx)
|
||||
jsonExt.EncodeType(RegEx{}, jencRegEx)
|
||||
|
||||
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
|
||||
jsonExt.DecodeKeyed("$oid", jdecObjectId)
|
||||
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
|
||||
jsonExt.EncodeType(ObjectId(""), jencObjectId)
|
||||
|
||||
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
|
||||
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
|
||||
|
||||
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
|
||||
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
|
||||
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
|
||||
jsonExt.EncodeType(int64(0), jencNumberLong)
|
||||
jsonExt.EncodeType(int(0), jencInt)
|
||||
|
||||
funcExt.DecodeConst("MinKey", MinKey)
|
||||
funcExt.DecodeConst("MaxKey", MaxKey)
|
||||
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
|
||||
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
|
||||
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
|
||||
|
||||
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
|
||||
jsonExt.EncodeType(Undefined, jencUndefined)
|
||||
|
||||
jsonExt.Extend(&funcExt)
|
||||
}
|
||||
|
||||
func fbytes(format string, args ...interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, format, args...)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func jdecBinary(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Binary []byte `json:"$binary"`
|
||||
Type string `json:"$type"`
|
||||
Func struct {
|
||||
Binary []byte `json:"$binary"`
|
||||
Type int64 `json:"$type"`
|
||||
} `json:"$binaryFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var binData []byte
|
||||
var binKind int64
|
||||
if v.Type == "" && v.Binary == nil {
|
||||
binData = v.Func.Binary
|
||||
binKind = v.Func.Type
|
||||
} else if v.Type == "" {
|
||||
return v.Binary, nil
|
||||
} else {
|
||||
binData = v.Binary
|
||||
binKind, err = strconv.ParseInt(v.Type, 0, 64)
|
||||
if err != nil {
|
||||
binKind = -1
|
||||
}
|
||||
}
|
||||
|
||||
if binKind == 0 {
|
||||
return binData, nil
|
||||
}
|
||||
if binKind < 0 || binKind > 255 {
|
||||
return nil, fmt.Errorf("invalid type in binary object: %s", data)
|
||||
}
|
||||
|
||||
return Binary{Kind: byte(binKind), Data: binData}, nil
|
||||
}
|
||||
|
||||
func jencBinarySlice(v interface{}) ([]byte, error) {
|
||||
in := v.([]byte)
|
||||
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
|
||||
base64.StdEncoding.Encode(out, in)
|
||||
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
|
||||
}
|
||||
|
||||
func jencBinaryType(v interface{}) ([]byte, error) {
|
||||
in := v.(Binary)
|
||||
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
|
||||
base64.StdEncoding.Encode(out, in.Data)
|
||||
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
|
||||
}
|
||||
|
||||
const jdateFormat = "2006-01-02T15:04:05.999Z"
|
||||
|
||||
func jdecDate(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
S string `json:"$date"`
|
||||
Func struct {
|
||||
S string
|
||||
} `json:"$dateFunc"`
|
||||
}
|
||||
_ = jdec(data, &v)
|
||||
if v.S == "" {
|
||||
v.S = v.Func.S
|
||||
}
|
||||
if v.S != "" {
|
||||
for _, format := range []string{jdateFormat, "2006-01-02"} {
|
||||
t, err := time.Parse(format, v.S)
|
||||
if err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("cannot parse date: %q", v.S)
|
||||
}
|
||||
|
||||
var vn struct {
|
||||
Date struct {
|
||||
N int64 `json:"$numberLong,string"`
|
||||
} `json:"$date"`
|
||||
Func struct {
|
||||
S int64
|
||||
} `json:"$dateFunc"`
|
||||
}
|
||||
err := jdec(data, &vn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse date: %q", data)
|
||||
}
|
||||
n := vn.Date.N
|
||||
if n == 0 {
|
||||
n = vn.Func.S
|
||||
}
|
||||
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
func jencDate(v interface{}) ([]byte, error) {
|
||||
t := v.(time.Time)
|
||||
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
|
||||
}
|
||||
|
||||
func jdecTimestamp(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Func struct {
|
||||
T int32 `json:"t"`
|
||||
I int32 `json:"i"`
|
||||
} `json:"$timestamp"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
|
||||
}
|
||||
|
||||
func jencTimestamp(v interface{}) ([]byte, error) {
|
||||
ts := uint64(v.(MongoTimestamp))
|
||||
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
|
||||
}
|
||||
|
||||
func jdecRegEx(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Regex string `json:"$regex"`
|
||||
Options string `json:"$options"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return RegEx{v.Regex, v.Options}, nil
|
||||
}
|
||||
|
||||
func jencRegEx(v interface{}) ([]byte, error) {
|
||||
re := v.(RegEx)
|
||||
type regex struct {
|
||||
Regex string `json:"$regex"`
|
||||
Options string `json:"$options"`
|
||||
}
|
||||
return json.Marshal(regex{re.Pattern, re.Options})
|
||||
}
|
||||
|
||||
func jdecObjectId(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Id string `json:"$oid"`
|
||||
Func struct {
|
||||
Id string
|
||||
} `json:"$oidFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.Id == "" {
|
||||
v.Id = v.Func.Id
|
||||
}
|
||||
return ObjectIdHex(v.Id), nil
|
||||
}
|
||||
|
||||
func jencObjectId(v interface{}) ([]byte, error) {
|
||||
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
|
||||
}
|
||||
|
||||
func jdecDBRef(data []byte) (interface{}, error) {
|
||||
// TODO Support unmarshaling $ref and $id into the input value.
|
||||
var v struct {
|
||||
Obj map[string]interface{} `json:"$dbrefFunc"`
|
||||
}
|
||||
// TODO Fix this. Must not be required.
|
||||
v.Obj = make(map[string]interface{})
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.Obj, nil
|
||||
}
|
||||
|
||||
func jdecNumberLong(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$numberLong,string"`
|
||||
Func struct {
|
||||
N int64 `json:",string"`
|
||||
} `json:"$numberLongFunc"`
|
||||
}
|
||||
var vn struct {
|
||||
N int64 `json:"$numberLong"`
|
||||
Func struct {
|
||||
N int64
|
||||
} `json:"$numberLongFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
err = jdec(data, &vn)
|
||||
v.N = vn.N
|
||||
v.Func.N = vn.Func.N
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 0 {
|
||||
return v.N, nil
|
||||
}
|
||||
return v.Func.N, nil
|
||||
}
|
||||
|
||||
func jencNumberLong(v interface{}) ([]byte, error) {
|
||||
n := v.(int64)
|
||||
f := `{"$numberLong":"%d"}`
|
||||
if n <= 1<<53 {
|
||||
f = `{"$numberLong":%d}`
|
||||
}
|
||||
return fbytes(f, n), nil
|
||||
}
|
||||
|
||||
func jencInt(v interface{}) ([]byte, error) {
|
||||
n := v.(int)
|
||||
f := `{"$numberLong":"%d"}`
|
||||
if int64(n) <= 1<<53 {
|
||||
f = `%d`
|
||||
}
|
||||
return fbytes(f, n), nil
|
||||
}
|
||||
|
||||
func jdecMinKey(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$minKey"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 1 {
|
||||
return nil, fmt.Errorf("invalid $minKey object: %s", data)
|
||||
}
|
||||
return MinKey, nil
|
||||
}
|
||||
|
||||
func jdecMaxKey(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$maxKey"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 1 {
|
||||
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
|
||||
}
|
||||
return MaxKey, nil
|
||||
}
|
||||
|
||||
func jencMinMaxKey(v interface{}) ([]byte, error) {
|
||||
switch v.(orderKey) {
|
||||
case MinKey:
|
||||
return []byte(`{"$minKey":1}`), nil
|
||||
case MaxKey:
|
||||
return []byte(`{"$maxKey":1}`), nil
|
||||
}
|
||||
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
|
||||
}
|
||||
|
||||
func jdecUndefined(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
B bool `json:"$undefined"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !v.B {
|
||||
return nil, fmt.Errorf("invalid $undefined object: %s", data)
|
||||
}
|
||||
return Undefined, nil
|
||||
}
|
||||
|
||||
func jencUndefined(v interface{}) ([]byte, error) {
|
||||
return []byte(`{"$undefined":true}`), nil
|
||||
}
|
||||
184
vendor/gopkg.in/mgo.v2/bson/json_test.go
generated
vendored
184
vendor/gopkg.in/mgo.v2/bson/json_test.go
generated
vendored
@@ -1,184 +0,0 @@
|
||||
package bson_test
|
||||
|
||||
import (
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type jsonTest struct {
|
||||
a interface{} // value encoded into JSON (optional)
|
||||
b string // JSON expected as output of <a>, and used as input to <c>
|
||||
c interface{} // Value expected from decoding <b>, defaults to <a>
|
||||
e string // error string, if decoding (b) should fail
|
||||
}
|
||||
|
||||
var jsonTests = []jsonTest{
|
||||
// $binary
|
||||
{
|
||||
a: []byte("foo"),
|
||||
b: `{"$binary":"Zm9v","$type":"0x0"}`,
|
||||
}, {
|
||||
a: bson.Binary{Kind: 2, Data: []byte("foo")},
|
||||
b: `{"$binary":"Zm9v","$type":"0x2"}`,
|
||||
}, {
|
||||
b: `BinData(2,"Zm9v")`,
|
||||
c: bson.Binary{Kind: 2, Data: []byte("foo")},
|
||||
},
|
||||
|
||||
// $date
|
||||
{
|
||||
a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
|
||||
b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
|
||||
}, {
|
||||
b: `{"$date": {"$numberLong": "1002"}}`,
|
||||
c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
|
||||
}, {
|
||||
b: `ISODate("2016-05-15T01:02:03.004Z")`,
|
||||
c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
|
||||
}, {
|
||||
b: `new Date(1000)`,
|
||||
c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
|
||||
}, {
|
||||
b: `new Date("2016-05-15")`,
|
||||
c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
||||
// $timestamp
|
||||
{
|
||||
a: bson.MongoTimestamp(4294967298),
|
||||
b: `{"$timestamp":{"t":1,"i":2}}`,
|
||||
}, {
|
||||
b: `Timestamp(1, 2)`,
|
||||
c: bson.MongoTimestamp(4294967298),
|
||||
},
|
||||
|
||||
// $regex
|
||||
{
|
||||
a: bson.RegEx{"pattern", "options"},
|
||||
b: `{"$regex":"pattern","$options":"options"}`,
|
||||
},
|
||||
|
||||
// $oid
|
||||
{
|
||||
a: bson.ObjectIdHex("0123456789abcdef01234567"),
|
||||
b: `{"$oid":"0123456789abcdef01234567"}`,
|
||||
}, {
|
||||
b: `ObjectId("0123456789abcdef01234567")`,
|
||||
c: bson.ObjectIdHex("0123456789abcdef01234567"),
|
||||
},
|
||||
|
||||
// $ref (no special type)
|
||||
{
|
||||
b: `DBRef("name", "id")`,
|
||||
c: map[string]interface{}{"$ref": "name", "$id": "id"},
|
||||
},
|
||||
|
||||
// $numberLong
|
||||
{
|
||||
a: 123,
|
||||
b: `123`,
|
||||
}, {
|
||||
a: int64(9007199254740992),
|
||||
b: `{"$numberLong":9007199254740992}`,
|
||||
}, {
|
||||
a: int64(1<<53 + 1),
|
||||
b: `{"$numberLong":"9007199254740993"}`,
|
||||
}, {
|
||||
a: 1<<53 + 1,
|
||||
b: `{"$numberLong":"9007199254740993"}`,
|
||||
c: int64(9007199254740993),
|
||||
}, {
|
||||
b: `NumberLong(9007199254740992)`,
|
||||
c: int64(1 << 53),
|
||||
}, {
|
||||
b: `NumberLong("9007199254740993")`,
|
||||
c: int64(1<<53 + 1),
|
||||
},
|
||||
|
||||
// $minKey, $maxKey
|
||||
{
|
||||
a: bson.MinKey,
|
||||
b: `{"$minKey":1}`,
|
||||
}, {
|
||||
a: bson.MaxKey,
|
||||
b: `{"$maxKey":1}`,
|
||||
}, {
|
||||
b: `MinKey`,
|
||||
c: bson.MinKey,
|
||||
}, {
|
||||
b: `MaxKey`,
|
||||
c: bson.MaxKey,
|
||||
}, {
|
||||
b: `{"$minKey":0}`,
|
||||
e: `invalid $minKey object: {"$minKey":0}`,
|
||||
}, {
|
||||
b: `{"$maxKey":0}`,
|
||||
e: `invalid $maxKey object: {"$maxKey":0}`,
|
||||
},
|
||||
|
||||
{
|
||||
a: bson.Undefined,
|
||||
b: `{"$undefined":true}`,
|
||||
}, {
|
||||
b: `undefined`,
|
||||
c: bson.Undefined,
|
||||
}, {
|
||||
b: `{"v": undefined}`,
|
||||
c: struct{ V interface{} }{bson.Undefined},
|
||||
},
|
||||
|
||||
// Unquoted keys and trailing commas
|
||||
{
|
||||
b: `{$foo: ["bar",],}`,
|
||||
c: map[string]interface{}{"$foo": []interface{}{"bar"}},
|
||||
},
|
||||
}
|
||||
|
||||
func (s *S) TestJSON(c *C) {
|
||||
for i, item := range jsonTests {
|
||||
c.Logf("------------ (#%d)", i)
|
||||
c.Logf("A: %#v", item.a)
|
||||
c.Logf("B: %#v", item.b)
|
||||
|
||||
if item.c == nil {
|
||||
item.c = item.a
|
||||
} else {
|
||||
c.Logf("C: %#v", item.c)
|
||||
}
|
||||
if item.e != "" {
|
||||
c.Logf("E: %s", item.e)
|
||||
}
|
||||
|
||||
if item.a != nil {
|
||||
data, err := bson.MarshalJSON(item.a)
|
||||
c.Assert(err, IsNil)
|
||||
c.Logf("Dumped: %#v", string(data))
|
||||
c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
|
||||
}
|
||||
|
||||
var zero interface{}
|
||||
if item.c == nil {
|
||||
zero = &struct{}{}
|
||||
} else {
|
||||
zero = reflect.New(reflect.TypeOf(item.c)).Interface()
|
||||
}
|
||||
err := bson.UnmarshalJSON([]byte(item.b), zero)
|
||||
if item.e != "" {
|
||||
c.Assert(err, NotNil)
|
||||
c.Assert(err.Error(), Equals, item.e)
|
||||
continue
|
||||
}
|
||||
c.Assert(err, IsNil)
|
||||
zerov := reflect.ValueOf(zero)
|
||||
value := zerov.Interface()
|
||||
if zerov.Kind() == reflect.Ptr {
|
||||
value = zerov.Elem().Interface()
|
||||
}
|
||||
c.Logf("Loaded: %#v", value)
|
||||
c.Assert(value, DeepEquals, item.c)
|
||||
}
|
||||
}
|
||||
27
vendor/gopkg.in/mgo.v2/bson/specdata/update.sh
generated
vendored
27
vendor/gopkg.in/mgo.v2/bson/specdata/update.sh
generated
vendored
@@ -1,27 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -d specifications ]; then
|
||||
git clone -b bson git@github.com:jyemin/specifications
|
||||
fi
|
||||
|
||||
TESTFILE="../specdata_test.go"
|
||||
|
||||
cat <<END > $TESTFILE
|
||||
package bson_test
|
||||
|
||||
var specTests = []string{
|
||||
END
|
||||
|
||||
for file in specifications/source/bson/tests/*.yml; do
|
||||
(
|
||||
echo '`'
|
||||
cat $file
|
||||
echo -n '`,'
|
||||
) >> $TESTFILE
|
||||
done
|
||||
|
||||
echo '}' >> $TESTFILE
|
||||
|
||||
gofmt -w $TESTFILE
|
||||
241
vendor/gopkg.in/mgo.v2/bson/specdata_test.go
generated
vendored
241
vendor/gopkg.in/mgo.v2/bson/specdata_test.go
generated
vendored
@@ -1,241 +0,0 @@
|
||||
package bson_test
|
||||
|
||||
var specTests = []string{
|
||||
`
|
||||
---
|
||||
description: "Array type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
a : []
|
||||
encoded: 0D000000046100050000000000
|
||||
-
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 140000000461000C0000001030000A0000000000
|
||||
-
|
||||
# Decode an array that uses an empty string as the key
|
||||
decodeOnly : true
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 130000000461000B00000010000A0000000000
|
||||
-
|
||||
# Decode an array that uses a non-numeric string as the key
|
||||
decodeOnly : true
|
||||
decoded:
|
||||
a: [10]
|
||||
encoded: 150000000461000D000000106162000A0000000000
|
||||
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "Boolean type"
|
||||
documents:
|
||||
-
|
||||
encoded: "090000000862000100"
|
||||
decoded: { "b" : true }
|
||||
-
|
||||
encoded: "090000000862000000"
|
||||
decoded: { "b" : false }
|
||||
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "Corrupted BSON"
|
||||
documents:
|
||||
-
|
||||
encoded: "09000000016600"
|
||||
error: "truncated double"
|
||||
-
|
||||
encoded: "09000000026600"
|
||||
error: "truncated string"
|
||||
-
|
||||
encoded: "09000000036600"
|
||||
error: "truncated document"
|
||||
-
|
||||
encoded: "09000000046600"
|
||||
error: "truncated array"
|
||||
-
|
||||
encoded: "09000000056600"
|
||||
error: "truncated binary"
|
||||
-
|
||||
encoded: "09000000076600"
|
||||
error: "truncated objectid"
|
||||
-
|
||||
encoded: "09000000086600"
|
||||
error: "truncated boolean"
|
||||
-
|
||||
encoded: "09000000096600"
|
||||
error: "truncated date"
|
||||
-
|
||||
encoded: "090000000b6600"
|
||||
error: "truncated regex"
|
||||
-
|
||||
encoded: "090000000c6600"
|
||||
error: "truncated db pointer"
|
||||
-
|
||||
encoded: "0C0000000d6600"
|
||||
error: "truncated javascript"
|
||||
-
|
||||
encoded: "0C0000000e6600"
|
||||
error: "truncated symbol"
|
||||
-
|
||||
encoded: "0C0000000f6600"
|
||||
error: "truncated javascript with scope"
|
||||
-
|
||||
encoded: "0C000000106600"
|
||||
error: "truncated int32"
|
||||
-
|
||||
encoded: "0C000000116600"
|
||||
error: "truncated timestamp"
|
||||
-
|
||||
encoded: "0C000000126600"
|
||||
error: "truncated int64"
|
||||
-
|
||||
encoded: "0400000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "0500000001"
|
||||
error: basic
|
||||
-
|
||||
encoded: "05000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "0700000002610078563412"
|
||||
error: basic
|
||||
-
|
||||
encoded: "090000001061000500"
|
||||
error: basic
|
||||
-
|
||||
encoded: "00000000000000000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1300000002666f6f00040000006261720000"
|
||||
error: "basic"
|
||||
-
|
||||
encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1500000003666f6f000c0000000862617200010000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
|
||||
error: basic
|
||||
-
|
||||
encoded: "1000000002610004000000616263ff00"
|
||||
error: string is not null-terminated
|
||||
-
|
||||
encoded: "0c0000000200000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "120000000200ffffffff666f6f6261720000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000e00000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "120000000e00ffffffff666f6f6261720000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "180000000c00fa5bd841d6585d9900"
|
||||
error: ""
|
||||
-
|
||||
encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000d00000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0c0000000d00ffffffff0000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000000000000000c000000020001000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000001000000000c000000020000000000000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
|
||||
error: bad_string_length
|
||||
-
|
||||
encoded: "0E00000008616263646566676869707172737475"
|
||||
error: "Run-on CString"
|
||||
-
|
||||
encoded: "0100000000"
|
||||
error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
|
||||
-
|
||||
encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
|
||||
error: "One object, but with object size listed smaller than it is in the data"
|
||||
-
|
||||
encoded: "05000000"
|
||||
error: "One object, missing the EOO at the end"
|
||||
-
|
||||
encoded: "0500000001"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
|
||||
-
|
||||
encoded: "05000000ff"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
|
||||
-
|
||||
encoded: "0500000070"
|
||||
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
|
||||
-
|
||||
encoded: "07000000000000"
|
||||
error: "Invalid BSON type low range"
|
||||
-
|
||||
encoded: "07000000800000"
|
||||
error: "Invalid BSON type high range"
|
||||
-
|
||||
encoded: "090000000862000200"
|
||||
error: "Invalid boolean value of 2"
|
||||
-
|
||||
encoded: "09000000086200ff00"
|
||||
error: "Invalid boolean value of -1"
|
||||
`, `
|
||||
---
|
||||
description: "Int32 type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
i: -2147483648
|
||||
encoded: 0C0000001069000000008000
|
||||
-
|
||||
decoded:
|
||||
i: 2147483647
|
||||
encoded: 0C000000106900FFFFFF7F00
|
||||
-
|
||||
decoded:
|
||||
i: -1
|
||||
encoded: 0C000000106900FFFFFFFF00
|
||||
-
|
||||
decoded:
|
||||
i: 0
|
||||
encoded: 0C0000001069000000000000
|
||||
-
|
||||
decoded:
|
||||
i: 1
|
||||
encoded: 0C0000001069000100000000
|
||||
|
||||
`, `
|
||||
---
|
||||
description: "String type"
|
||||
documents:
|
||||
-
|
||||
decoded:
|
||||
s : ""
|
||||
encoded: 0D000000027300010000000000
|
||||
-
|
||||
decoded:
|
||||
s: "a"
|
||||
encoded: 0E00000002730002000000610000
|
||||
-
|
||||
decoded:
|
||||
s: "This is a string"
|
||||
encoded: 1D0000000273001100000054686973206973206120737472696E670000
|
||||
-
|
||||
decoded:
|
||||
s: "κόσμε"
|
||||
encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
|
||||
`}
|
||||
351
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
351
vendor/gopkg.in/mgo.v2/bulk.go
generated
vendored
@@ -1,351 +0,0 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// Bulk represents an operation that can be prepared with several
|
||||
// orthogonal changes before being delivered to the server.
|
||||
//
|
||||
// MongoDB servers older than version 2.6 do not have proper support for bulk
|
||||
// operations, so the driver attempts to map its API as much as possible into
|
||||
// the functionality that works. In particular, in those releases updates and
|
||||
// removals are sent individually, and inserts are sent in bulk but have
|
||||
// suboptimal error reporting compared to more recent versions of the server.
|
||||
// See the documentation of BulkErrorCase for details on that.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
|
||||
//
|
||||
type Bulk struct {
|
||||
c *Collection
|
||||
opcount int
|
||||
actions []bulkAction
|
||||
ordered bool
|
||||
}
|
||||
|
||||
type bulkOp int
|
||||
|
||||
const (
|
||||
bulkInsert bulkOp = iota + 1
|
||||
bulkUpdate
|
||||
bulkUpdateAll
|
||||
bulkRemove
|
||||
)
|
||||
|
||||
type bulkAction struct {
|
||||
op bulkOp
|
||||
docs []interface{}
|
||||
idxs []int
|
||||
}
|
||||
|
||||
type bulkUpdateOp []interface{}
|
||||
type bulkDeleteOp []interface{}
|
||||
|
||||
// BulkResult holds the results for a bulk operation.
|
||||
type BulkResult struct {
|
||||
Matched int
|
||||
Modified int // Available only for MongoDB 2.6+
|
||||
|
||||
// Be conservative while we understand exactly how to report these
|
||||
// results in a useful and convenient way, and also how to emulate
|
||||
// them with prior servers.
|
||||
private bool
|
||||
}
|
||||
|
||||
// BulkError holds an error returned from running a Bulk operation.
|
||||
// Individual errors may be obtained and inspected via the Cases method.
|
||||
type BulkError struct {
|
||||
ecases []BulkErrorCase
|
||||
}
|
||||
|
||||
func (e *BulkError) Error() string {
|
||||
if len(e.ecases) == 0 {
|
||||
return "invalid BulkError instance: no errors"
|
||||
}
|
||||
if len(e.ecases) == 1 {
|
||||
return e.ecases[0].Err.Error()
|
||||
}
|
||||
msgs := make([]string, 0, len(e.ecases))
|
||||
seen := make(map[string]bool)
|
||||
for _, ecase := range e.ecases {
|
||||
msg := ecase.Err.Error()
|
||||
if !seen[msg] {
|
||||
seen[msg] = true
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
}
|
||||
if len(msgs) == 1 {
|
||||
return msgs[0]
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("multiple errors in bulk operation:\n")
|
||||
for _, msg := range msgs {
|
||||
buf.WriteString(" - ")
|
||||
buf.WriteString(msg)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type bulkErrorCases []BulkErrorCase
|
||||
|
||||
func (slice bulkErrorCases) Len() int { return len(slice) }
|
||||
func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
|
||||
func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
|
||||
|
||||
// BulkErrorCase holds an individual error found while attempting a single change
|
||||
// within a bulk operation, and the position in which it was enqueued.
|
||||
//
|
||||
// MongoDB servers older than version 2.6 do not have proper support for bulk
|
||||
// operations, so the driver attempts to map its API as much as possible into
|
||||
// the functionality that works. In particular, only the last error is reported
|
||||
// for bulk inserts and without any positional information, so the Index
|
||||
// field is set to -1 in these cases.
|
||||
type BulkErrorCase struct {
|
||||
Index int // Position of operation that failed, or -1 if unknown.
|
||||
Err error
|
||||
}
|
||||
|
||||
// Cases returns all individual errors found while attempting the requested changes.
|
||||
//
|
||||
// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
|
||||
func (e *BulkError) Cases() []BulkErrorCase {
|
||||
return e.ecases
|
||||
}
|
||||
|
||||
// Bulk returns a value to prepare the execution of a bulk operation.
|
||||
func (c *Collection) Bulk() *Bulk {
|
||||
return &Bulk{c: c, ordered: true}
|
||||
}
|
||||
|
||||
// Unordered puts the bulk operation in unordered mode.
|
||||
//
|
||||
// In unordered mode the indvidual operations may be sent
|
||||
// out of order, which means latter operations may proceed
|
||||
// even if prior ones have failed.
|
||||
func (b *Bulk) Unordered() {
|
||||
b.ordered = false
|
||||
}
|
||||
|
||||
func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
|
||||
var action *bulkAction
|
||||
if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
|
||||
action = &b.actions[len(b.actions)-1]
|
||||
} else if !b.ordered {
|
||||
for i := range b.actions {
|
||||
if b.actions[i].op == op {
|
||||
action = &b.actions[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if action == nil {
|
||||
b.actions = append(b.actions, bulkAction{op: op})
|
||||
action = &b.actions[len(b.actions)-1]
|
||||
}
|
||||
for i := 0; i < opcount; i++ {
|
||||
action.idxs = append(action.idxs, b.opcount)
|
||||
b.opcount++
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
// Insert queues up the provided documents for insertion.
|
||||
func (b *Bulk) Insert(docs ...interface{}) {
|
||||
action := b.action(bulkInsert, len(docs))
|
||||
action.docs = append(action.docs, docs...)
|
||||
}
|
||||
|
||||
// Remove queues up the provided selectors for removing matching documents.
|
||||
// Each selector will remove only a single matching document.
|
||||
func (b *Bulk) Remove(selectors ...interface{}) {
|
||||
action := b.action(bulkRemove, len(selectors))
|
||||
for _, selector := range selectors {
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &deleteOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Flags: 1,
|
||||
Limit: 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveAll queues up the provided selectors for removing all matching documents.
|
||||
// Each selector will remove all matching documents.
|
||||
func (b *Bulk) RemoveAll(selectors ...interface{}) {
|
||||
action := b.action(bulkRemove, len(selectors))
|
||||
for _, selector := range selectors {
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &deleteOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Flags: 0,
|
||||
Limit: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Update queues up the provided pairs of updating instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair matches exactly one document for updating at most.
|
||||
func (b *Bulk) Update(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.Update requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate, len(pairs)/2)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAll queues up the provided pairs of updating instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair updates all documents matching the selector.
|
||||
func (b *Bulk) UpdateAll(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.UpdateAll requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate, len(pairs)/2)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
Flags: 2,
|
||||
Multi: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Upsert queues up the provided pairs of upserting instructions.
|
||||
// The first element of each pair selects which documents must be
|
||||
// updated, and the second element defines how to update it.
|
||||
// Each pair matches exactly one document for updating at most.
|
||||
func (b *Bulk) Upsert(pairs ...interface{}) {
|
||||
if len(pairs)%2 != 0 {
|
||||
panic("Bulk.Update requires an even number of parameters")
|
||||
}
|
||||
action := b.action(bulkUpdate, len(pairs)/2)
|
||||
for i := 0; i < len(pairs); i += 2 {
|
||||
selector := pairs[i]
|
||||
if selector == nil {
|
||||
selector = bson.D{}
|
||||
}
|
||||
action.docs = append(action.docs, &updateOp{
|
||||
Collection: b.c.FullName,
|
||||
Selector: selector,
|
||||
Update: pairs[i+1],
|
||||
Flags: 1,
|
||||
Upsert: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs all the operations queued up.
|
||||
//
|
||||
// If an error is reported on an unordered bulk operation, the error value may
|
||||
// be an aggregation of all issues observed. As an exception to that, Insert
|
||||
// operations running on MongoDB versions prior to 2.6 will report the last
|
||||
// error only due to a limitation in the wire protocol.
|
||||
func (b *Bulk) Run() (*BulkResult, error) {
|
||||
var result BulkResult
|
||||
var berr BulkError
|
||||
var failed bool
|
||||
for i := range b.actions {
|
||||
action := &b.actions[i]
|
||||
var ok bool
|
||||
switch action.op {
|
||||
case bulkInsert:
|
||||
ok = b.runInsert(action, &result, &berr)
|
||||
case bulkUpdate:
|
||||
ok = b.runUpdate(action, &result, &berr)
|
||||
case bulkRemove:
|
||||
ok = b.runRemove(action, &result, &berr)
|
||||
default:
|
||||
panic("unknown bulk operation")
|
||||
}
|
||||
if !ok {
|
||||
failed = true
|
||||
if b.ordered {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
sort.Sort(bulkErrorCases(berr.ecases))
|
||||
return nil, &berr
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
|
||||
op := &insertOp{b.c.FullName, action.docs, 0}
|
||||
if !b.ordered {
|
||||
op.flags = 1 // ContinueOnError
|
||||
}
|
||||
lerr, err := b.c.writeOp(op, b.ordered)
|
||||
return b.checkSuccess(action, berr, lerr, err)
|
||||
}
|
||||
|
||||
func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
|
||||
lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
|
||||
if lerr != nil {
|
||||
result.Matched += lerr.N
|
||||
result.Modified += lerr.modified
|
||||
}
|
||||
return b.checkSuccess(action, berr, lerr, err)
|
||||
}
|
||||
|
||||
func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
|
||||
lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
|
||||
if lerr != nil {
|
||||
result.Matched += lerr.N
|
||||
result.Modified += lerr.modified
|
||||
}
|
||||
return b.checkSuccess(action, berr, lerr, err)
|
||||
}
|
||||
|
||||
func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
|
||||
if lerr != nil && len(lerr.ecases) > 0 {
|
||||
for i := 0; i < len(lerr.ecases); i++ {
|
||||
// Map back from the local error index into the visible one.
|
||||
ecase := lerr.ecases[i]
|
||||
idx := ecase.Index
|
||||
if idx >= 0 {
|
||||
idx = action.idxs[idx]
|
||||
}
|
||||
berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
|
||||
}
|
||||
return false
|
||||
} else if err != nil {
|
||||
for i := 0; i < len(action.idxs); i++ {
|
||||
berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
504
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
504
vendor/gopkg.in/mgo.v2/bulk_test.go
generated
vendored
@@ -1,504 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
func (s *S) TestBulkInsert(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"n": 1})
|
||||
bulk.Insert(M{"n": 2}, M{"n": 3})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
type doc struct {
|
||||
N int `_id`
|
||||
}
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("_id").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
|
||||
// The server has a batch limit of 1000 documents when using write commands.
|
||||
// This artificial limit did not exist with the old wire protocol, so to
|
||||
// avoid compatibility issues the implementation internally split batches
|
||||
// into the proper size and delivers them one by one. This test ensures that
|
||||
// the behavior of unordered (that is, continue on error) remains correct
|
||||
// when errors happen and there are batches left.
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
|
||||
const total = 4096
|
||||
type doc struct {
|
||||
Id int `_id`
|
||||
}
|
||||
docs := make([]interface{}, total)
|
||||
for i := 0; i < total; i++ {
|
||||
docs[i] = doc{i}
|
||||
}
|
||||
docs[1] = doc{0}
|
||||
bulk.Insert(docs...)
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
|
||||
n, err := coll.Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, total-1)
|
||||
|
||||
var res doc
|
||||
err = coll.FindId(1500).One(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res.Id, Equals, 1500)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkErrorString(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
// If it's just the same string multiple times, join it into a single message.
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
|
||||
_, err = bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*duplicate key.*")
|
||||
c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
// With matching errors but different messages, present them all.
|
||||
bulk = coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
|
||||
_, err = bulk.Run()
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$")
|
||||
c.Assert(err, ErrorMatches, "(?s).*dupone.*")
|
||||
c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
|
||||
} else {
|
||||
// Wire protocol query doesn't return all errors.
|
||||
c.Assert(err, ErrorMatches, ".*duplicate.*")
|
||||
}
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
// With mixed errors, present them all.
|
||||
bulk = coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
|
||||
_, err = bulk.Run()
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$")
|
||||
} else {
|
||||
// Wire protocol query doesn't return all errors.
|
||||
c.Assert(err, ErrorMatches, ".*array.*")
|
||||
}
|
||||
c.Assert(mgo.IsDup(err), Equals, false)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkErrorCases_2_6(c *C) {
|
||||
if !s.versionAtLeast(2, 6) {
|
||||
c.Skip("2.4- has poor bulk reporting")
|
||||
}
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
|
||||
// There's a limit of 1000 operations per command, so
|
||||
// this forces the more complex indexing logic to act.
|
||||
for i := 0; i < 1010; i++ {
|
||||
switch i {
|
||||
case 3, 14:
|
||||
bulk.Insert(M{"_id": "dupone"})
|
||||
case 5, 106:
|
||||
bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}})
|
||||
case 7, 1008:
|
||||
bulk.Insert(M{"_id": "duptwo"})
|
||||
default:
|
||||
bulk.Insert(M{"_id": i})
|
||||
}
|
||||
}
|
||||
|
||||
_, err = bulk.Run()
|
||||
ecases := err.(*mgo.BulkError).Cases()
|
||||
|
||||
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
|
||||
c.Check(ecases[0].Index, Equals, 14)
|
||||
c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*")
|
||||
c.Check(ecases[1].Index, Equals, 106)
|
||||
c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*")
|
||||
c.Check(ecases[2].Index, Equals, 1008)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkErrorCases_2_4(c *C) {
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Skip("2.6+ has better reporting")
|
||||
}
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
|
||||
// There's a limit of 1000 operations per command, so
|
||||
// this forces the more complex indexing logic to act.
|
||||
for i := 0; i < 1010; i++ {
|
||||
switch i {
|
||||
case 3, 14:
|
||||
bulk.Insert(M{"_id": "dupone"})
|
||||
case 5:
|
||||
bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}})
|
||||
case 106:
|
||||
bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}})
|
||||
case 7, 1008:
|
||||
bulk.Insert(M{"_id": "duptwo"})
|
||||
default:
|
||||
bulk.Insert(M{"_id": i})
|
||||
}
|
||||
}
|
||||
|
||||
_, err = bulk.Run()
|
||||
ecases := err.(*mgo.BulkError).Cases()
|
||||
|
||||
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*")
|
||||
c.Check(ecases[0].Index, Equals, -1)
|
||||
c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`)
|
||||
c.Check(ecases[1].Index, Equals, 106)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkErrorCasesOrdered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
bulk := coll.Bulk()
|
||||
|
||||
// There's a limit of 1000 operations per command, so
|
||||
// this forces the more complex indexing logic to act.
|
||||
for i := 0; i < 20; i++ {
|
||||
switch i {
|
||||
case 3, 14:
|
||||
bulk.Insert(M{"_id": "dupone"})
|
||||
case 7, 17:
|
||||
bulk.Insert(M{"_id": "duptwo"})
|
||||
default:
|
||||
bulk.Insert(M{"_id": i})
|
||||
}
|
||||
}
|
||||
|
||||
_, err = bulk.Run()
|
||||
ecases := err.(*mgo.BulkError).Cases()
|
||||
|
||||
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Check(ecases[0].Index, Equals, 14)
|
||||
} else {
|
||||
c.Check(ecases[0].Index, Equals, -1)
|
||||
}
|
||||
c.Check(ecases, HasLen, 1)
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
|
||||
bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
|
||||
bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
|
||||
bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 4)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 3)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateError(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Update(
|
||||
M{"n": 1}, M{"$set": M{"n": 10}},
|
||||
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
|
||||
M{"n": 3}, M{"$set": M{"n": 30}},
|
||||
)
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*_id.*")
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateErrorUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Update(
|
||||
M{"n": 1}, M{"$set": M{"n": 10}},
|
||||
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
|
||||
M{"n": 3}, M{"$set": M{"n": 30}},
|
||||
)
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, ErrorMatches, ".*_id.*")
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpdateAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
|
||||
bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) // Won't change.
|
||||
bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
|
||||
bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 6)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 5)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkMixedUnordered(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
// Abuse undefined behavior to ensure the desired implementation is in place.
|
||||
bulk := coll.Bulk()
|
||||
bulk.Unordered()
|
||||
bulk.Insert(M{"n": 1})
|
||||
bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
|
||||
bulk.Insert(M{"n": 2})
|
||||
bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
|
||||
bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
|
||||
bulk.Insert(M{"n": 3})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 3)
|
||||
if s.versionAtLeast(2, 6) {
|
||||
c.Assert(r.Modified, Equals, 3)
|
||||
}
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkUpsert(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
|
||||
bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkRemove(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.Remove(M{"n": 1})
|
||||
bulk.Remove(M{"n": 2}, M{"n": 4})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 3)
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{3}, {4}})
|
||||
}
|
||||
|
||||
func (s *S) TestBulkRemoveAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40001")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
coll := session.DB("mydb").C("mycoll")
|
||||
|
||||
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
bulk := coll.Bulk()
|
||||
bulk.RemoveAll(M{"n": 1})
|
||||
bulk.RemoveAll(M{"n": 2}, M{"n": 4})
|
||||
r, err := bulk.Run()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(r.Matched, Equals, 4)
|
||||
|
||||
type doc struct{ N int }
|
||||
var res []doc
|
||||
err = coll.Find(nil).Sort("n").All(&res)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(res, DeepEquals, []doc{{3}})
|
||||
}
|
||||
682
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
682
vendor/gopkg.in/mgo.v2/cluster.go
generated
vendored
@@ -1,682 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mongo cluster encapsulation.
|
||||
//
|
||||
// A cluster enables the communication with one or more servers participating
|
||||
// in a mongo cluster. This works with individual servers, a replica set,
|
||||
// a replica pair, one or multiple mongos routers, etc.
|
||||
|
||||
type mongoCluster struct {
|
||||
sync.RWMutex
|
||||
serverSynced sync.Cond
|
||||
userSeeds []string
|
||||
dynaSeeds []string
|
||||
servers mongoServers
|
||||
masters mongoServers
|
||||
references int
|
||||
syncing bool
|
||||
direct bool
|
||||
failFast bool
|
||||
syncCount uint
|
||||
setName string
|
||||
cachedIndex map[string]bool
|
||||
sync chan bool
|
||||
dial dialer
|
||||
}
|
||||
|
||||
func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
|
||||
cluster := &mongoCluster{
|
||||
userSeeds: userSeeds,
|
||||
references: 1,
|
||||
direct: direct,
|
||||
failFast: failFast,
|
||||
dial: dial,
|
||||
setName: setName,
|
||||
}
|
||||
cluster.serverSynced.L = cluster.RWMutex.RLocker()
|
||||
cluster.sync = make(chan bool, 1)
|
||||
stats.cluster(+1)
|
||||
go cluster.syncServersLoop()
|
||||
return cluster
|
||||
}
|
||||
|
||||
// Acquire increases the reference count for the cluster.
|
||||
func (cluster *mongoCluster) Acquire() {
|
||||
cluster.Lock()
|
||||
cluster.references++
|
||||
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// Release decreases the reference count for the cluster. Once
|
||||
// it reaches zero, all servers will be closed.
|
||||
func (cluster *mongoCluster) Release() {
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
panic("cluster.Release() with references == 0")
|
||||
}
|
||||
cluster.references--
|
||||
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
|
||||
if cluster.references == 0 {
|
||||
for _, server := range cluster.servers.Slice() {
|
||||
server.Close()
|
||||
}
|
||||
// Wake up the sync loop so it can die.
|
||||
cluster.syncServers()
|
||||
stats.cluster(-1)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) LiveServers() (servers []string) {
|
||||
cluster.RLock()
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
servers = append(servers, serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return servers
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) removeServer(server *mongoServer) {
|
||||
cluster.Lock()
|
||||
cluster.masters.Remove(server)
|
||||
other := cluster.servers.Remove(server)
|
||||
cluster.Unlock()
|
||||
if other != nil {
|
||||
other.Close()
|
||||
log("Removed server ", server.Addr, " from cluster.")
|
||||
}
|
||||
server.Close()
|
||||
}
|
||||
|
||||
type isMasterResult struct {
|
||||
IsMaster bool
|
||||
Secondary bool
|
||||
Primary string
|
||||
Hosts []string
|
||||
Passives []string
|
||||
Tags bson.D
|
||||
Msg string
|
||||
SetName string `bson:"setName"`
|
||||
MaxWireVersion int `bson:"maxWireVersion"`
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
|
||||
// Monotonic let's it talk to a slave and still hold the socket.
|
||||
session := newSession(Monotonic, cluster, 10*time.Second)
|
||||
session.setSocket(socket)
|
||||
err := session.Run("ismaster", result)
|
||||
session.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
type possibleTimeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
var syncSocketTimeout = 5 * time.Second
|
||||
|
||||
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
|
||||
var syncTimeout time.Duration
|
||||
if raceDetector {
|
||||
// This variable is only ever touched by tests.
|
||||
globalMutex.Lock()
|
||||
syncTimeout = syncSocketTimeout
|
||||
globalMutex.Unlock()
|
||||
} else {
|
||||
syncTimeout = syncSocketTimeout
|
||||
}
|
||||
|
||||
addr := server.Addr
|
||||
log("SYNC Processing ", addr, "...")
|
||||
|
||||
// Retry a few times to avoid knocking a server down for a hiccup.
|
||||
var result isMasterResult
|
||||
var tryerr error
|
||||
for retry := 0; ; retry++ {
|
||||
if retry == 3 || retry == 1 && cluster.failFast {
|
||||
return nil, nil, tryerr
|
||||
}
|
||||
if retry > 0 {
|
||||
// Don't abuse the server needlessly if there's something actually wrong.
|
||||
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
|
||||
// Give a chance for waiters to timeout as well.
|
||||
cluster.serverSynced.Broadcast()
|
||||
}
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
// It's not clear what would be a good timeout here. Is it
|
||||
// better to wait longer or to retry?
|
||||
socket, _, err := server.AcquireSocket(0, syncTimeout)
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Failed to get socket to %s: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
err = cluster.isMaster(socket, &result)
|
||||
socket.Release()
|
||||
if err != nil {
|
||||
tryerr = err
|
||||
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
|
||||
break
|
||||
}
|
||||
|
||||
if cluster.setName != "" && result.SetName != cluster.setName {
|
||||
logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
|
||||
}
|
||||
|
||||
if result.IsMaster {
|
||||
debugf("SYNC %s is a master.", addr)
|
||||
if !server.info.Master {
|
||||
// Made an incorrect assumption above, so fix stats.
|
||||
stats.conn(-1, false)
|
||||
stats.conn(+1, true)
|
||||
}
|
||||
} else if result.Secondary {
|
||||
debugf("SYNC %s is a slave.", addr)
|
||||
} else if cluster.direct {
|
||||
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
|
||||
} else {
|
||||
logf("SYNC %s is neither a master nor a slave.", addr)
|
||||
// Let stats track it as whatever was known before.
|
||||
return nil, nil, errors.New(addr + " is not a master nor slave")
|
||||
}
|
||||
|
||||
info = &mongoServerInfo{
|
||||
Master: result.IsMaster,
|
||||
Mongos: result.Msg == "isdbgrid",
|
||||
Tags: result.Tags,
|
||||
SetName: result.SetName,
|
||||
MaxWireVersion: result.MaxWireVersion,
|
||||
}
|
||||
|
||||
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
|
||||
if result.Primary != "" {
|
||||
// First in the list to speed up master discovery.
|
||||
hosts = append(hosts, result.Primary)
|
||||
}
|
||||
hosts = append(hosts, result.Hosts...)
|
||||
hosts = append(hosts, result.Passives...)
|
||||
|
||||
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
|
||||
return info, hosts, nil
|
||||
}
|
||||
|
||||
type syncKind bool
|
||||
|
||||
const (
|
||||
completeSync syncKind = true
|
||||
partialSync syncKind = false
|
||||
)
|
||||
|
||||
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
|
||||
cluster.Lock()
|
||||
current := cluster.servers.Search(server.ResolvedAddr)
|
||||
if current == nil {
|
||||
if syncKind == partialSync {
|
||||
cluster.Unlock()
|
||||
server.Close()
|
||||
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
|
||||
return
|
||||
}
|
||||
cluster.servers.Add(server)
|
||||
if info.Master {
|
||||
cluster.masters.Add(server)
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a master.")
|
||||
} else {
|
||||
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
|
||||
}
|
||||
} else {
|
||||
if server != current {
|
||||
panic("addServer attempting to add duplicated server")
|
||||
}
|
||||
if server.Info().Master != info.Master {
|
||||
if info.Master {
|
||||
log("SYNC Server ", server.Addr, " is now a master.")
|
||||
cluster.masters.Add(server)
|
||||
} else {
|
||||
log("SYNC Server ", server.Addr, " is now a slave.")
|
||||
cluster.masters.Remove(server)
|
||||
}
|
||||
}
|
||||
}
|
||||
server.SetInfo(info)
|
||||
debugf("SYNC Broadcasting availability of server %s", server.Addr)
|
||||
cluster.serverSynced.Broadcast()
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) getKnownAddrs() []string {
|
||||
cluster.RLock()
|
||||
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
|
||||
seen := make(map[string]bool, max)
|
||||
known := make([]string, 0, max)
|
||||
|
||||
add := func(addr string) {
|
||||
if _, found := seen[addr]; !found {
|
||||
seen[addr] = true
|
||||
known = append(known, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range cluster.userSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, addr := range cluster.dynaSeeds {
|
||||
add(addr)
|
||||
}
|
||||
for _, serv := range cluster.servers.Slice() {
|
||||
add(serv.Addr)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
return known
|
||||
}
|
||||
|
||||
// syncServers injects a value into the cluster.sync channel to force
|
||||
// an iteration of the syncServersLoop function.
|
||||
func (cluster *mongoCluster) syncServers() {
|
||||
select {
|
||||
case cluster.sync <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// How long to wait for a checkup of the cluster topology if nothing
|
||||
// else kicks a synchronization before that.
|
||||
const syncServersDelay = 30 * time.Second
|
||||
const syncShortDelay = 500 * time.Millisecond
|
||||
|
||||
// syncServersLoop loops while the cluster is alive to keep its idea of
|
||||
// the server topology up-to-date. It must be called just once from
|
||||
// newCluster. The loop iterates once syncServersDelay has passed, or
|
||||
// if somebody injects a value into the cluster.sync channel to force a
|
||||
// synchronization. A loop iteration will contact all servers in
|
||||
// parallel, ask them about known peers and their own role within the
|
||||
// cluster, and then attempt to do the same with all the peers
|
||||
// retrieved.
|
||||
func (cluster *mongoCluster) syncServersLoop() {
|
||||
for {
|
||||
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.references++ // Keep alive while syncing.
|
||||
direct := cluster.direct
|
||||
cluster.Unlock()
|
||||
|
||||
cluster.syncServersIteration(direct)
|
||||
|
||||
// We just synchronized, so consume any outstanding requests.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
default:
|
||||
}
|
||||
|
||||
cluster.Release()
|
||||
|
||||
// Hold off before allowing another sync. No point in
|
||||
// burning CPU looking for down servers.
|
||||
if !cluster.failFast {
|
||||
time.Sleep(syncShortDelay)
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
if cluster.references == 0 {
|
||||
cluster.Unlock()
|
||||
break
|
||||
}
|
||||
cluster.syncCount++
|
||||
// Poke all waiters so they have a chance to timeout or
|
||||
// restart syncing if they wish to.
|
||||
cluster.serverSynced.Broadcast()
|
||||
// Check if we have to restart immediately either way.
|
||||
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
|
||||
cluster.Unlock()
|
||||
|
||||
if restart {
|
||||
log("SYNC No masters found. Will synchronize again.")
|
||||
time.Sleep(syncShortDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
|
||||
|
||||
// Hold off until somebody explicitly requests a synchronization
|
||||
// or it's time to check for a cluster topology change again.
|
||||
select {
|
||||
case <-cluster.sync:
|
||||
case <-time.After(syncServersDelay):
|
||||
}
|
||||
}
|
||||
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
|
||||
cluster.RLock()
|
||||
server := cluster.servers.Search(tcpaddr.String())
|
||||
cluster.RUnlock()
|
||||
if server != nil {
|
||||
return server
|
||||
}
|
||||
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
|
||||
}
|
||||
|
||||
func resolveAddr(addr string) (*net.TCPAddr, error) {
|
||||
// Simple cases that do not need actual resolution. Works with IPv4 and v6.
|
||||
if host, port, err := net.SplitHostPort(addr); err == nil {
|
||||
if port, _ := strconv.Atoi(port); port > 0 {
|
||||
zone := ""
|
||||
if i := strings.LastIndex(host, "%"); i >= 0 {
|
||||
zone = host[i+1:]
|
||||
host = host[:i]
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to resolve IPv4 and v6 concurrently.
|
||||
addrChan := make(chan *net.TCPAddr, 2)
|
||||
for _, network := range []string{"udp4", "udp6"} {
|
||||
network := network
|
||||
go func() {
|
||||
// The unfortunate UDP dialing hack allows having a timeout on address resolution.
|
||||
conn, err := net.DialTimeout(network, addr, 10*time.Second)
|
||||
if err != nil {
|
||||
addrChan <- nil
|
||||
} else {
|
||||
addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
|
||||
tcpaddr := <-addrChan
|
||||
if tcpaddr == nil || len(tcpaddr.IP) != 4 {
|
||||
var timeout <-chan time.Time
|
||||
if tcpaddr != nil {
|
||||
// Don't wait too long if an IPv6 address is known.
|
||||
timeout = time.After(50 * time.Millisecond)
|
||||
}
|
||||
select {
|
||||
case <-timeout:
|
||||
case tcpaddr2 := <-addrChan:
|
||||
if tcpaddr == nil || tcpaddr2 != nil {
|
||||
// It's an IPv4 address or the only known address. Use it.
|
||||
tcpaddr = tcpaddr2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tcpaddr == nil {
|
||||
log("SYNC Failed to resolve server address: ", addr)
|
||||
return nil, errors.New("failed to resolve server address: " + addr)
|
||||
}
|
||||
if tcpaddr.String() != addr {
|
||||
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
|
||||
}
|
||||
return tcpaddr, nil
|
||||
}
|
||||
|
||||
type pendingAdd struct {
|
||||
server *mongoServer
|
||||
info *mongoServerInfo
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) syncServersIteration(direct bool) {
|
||||
log("SYNC Starting full topology synchronization...")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var m sync.Mutex
|
||||
notYetAdded := make(map[string]pendingAdd)
|
||||
addIfFound := make(map[string]bool)
|
||||
seen := make(map[string]bool)
|
||||
syncKind := partialSync
|
||||
|
||||
var spawnSync func(addr string, byMaster bool)
|
||||
spawnSync = func(addr string, byMaster bool) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tcpaddr, err := resolveAddr(addr)
|
||||
if err != nil {
|
||||
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
|
||||
return
|
||||
}
|
||||
resolvedAddr := tcpaddr.String()
|
||||
|
||||
m.Lock()
|
||||
if byMaster {
|
||||
if pending, ok := notYetAdded[resolvedAddr]; ok {
|
||||
delete(notYetAdded, resolvedAddr)
|
||||
m.Unlock()
|
||||
cluster.addServer(pending.server, pending.info, completeSync)
|
||||
return
|
||||
}
|
||||
addIfFound[resolvedAddr] = true
|
||||
}
|
||||
if seen[resolvedAddr] {
|
||||
m.Unlock()
|
||||
return
|
||||
}
|
||||
seen[resolvedAddr] = true
|
||||
m.Unlock()
|
||||
|
||||
server := cluster.server(addr, tcpaddr)
|
||||
info, hosts, err := cluster.syncServer(server)
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
add := direct || info.Master || addIfFound[resolvedAddr]
|
||||
if add {
|
||||
syncKind = completeSync
|
||||
} else {
|
||||
notYetAdded[resolvedAddr] = pendingAdd{server, info}
|
||||
}
|
||||
m.Unlock()
|
||||
if add {
|
||||
cluster.addServer(server, info, completeSync)
|
||||
}
|
||||
if !direct {
|
||||
for _, addr := range hosts {
|
||||
spawnSync(addr, info.Master)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
knownAddrs := cluster.getKnownAddrs()
|
||||
for _, addr := range knownAddrs {
|
||||
spawnSync(addr, false)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if syncKind == completeSync {
|
||||
logf("SYNC Synchronization was complete (got data from primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.removeServer(pending.server)
|
||||
}
|
||||
} else {
|
||||
logf("SYNC Synchronization was partial (cannot talk to primary).")
|
||||
for _, pending := range notYetAdded {
|
||||
cluster.addServer(pending.server, pending.info, partialSync)
|
||||
}
|
||||
}
|
||||
|
||||
cluster.Lock()
|
||||
mastersLen := cluster.masters.Len()
|
||||
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
|
||||
|
||||
// Update dynamic seeds, but only if we have any good servers. Otherwise,
|
||||
// leave them alone for better chances of a successful sync in the future.
|
||||
if syncKind == completeSync {
|
||||
dynaSeeds := make([]string, cluster.servers.Len())
|
||||
for i, server := range cluster.servers.Slice() {
|
||||
dynaSeeds[i] = server.Addr
|
||||
}
|
||||
cluster.dynaSeeds = dynaSeeds
|
||||
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
|
||||
// true, it will attempt to return a socket to a slave server. If it is
|
||||
// false, the socket will necessarily be to a master server.
|
||||
func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
|
||||
var started time.Time
|
||||
var syncCount uint
|
||||
warnedLimit := false
|
||||
for {
|
||||
cluster.RLock()
|
||||
for {
|
||||
mastersLen := cluster.masters.Len()
|
||||
slavesLen := cluster.servers.Len() - mastersLen
|
||||
debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
|
||||
if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
|
||||
break
|
||||
}
|
||||
if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
|
||||
break
|
||||
}
|
||||
if started.IsZero() {
|
||||
// Initialize after fast path above.
|
||||
started = time.Now()
|
||||
syncCount = cluster.syncCount
|
||||
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
|
||||
cluster.RUnlock()
|
||||
return nil, errors.New("no reachable servers")
|
||||
}
|
||||
log("Waiting for servers to synchronize...")
|
||||
cluster.syncServers()
|
||||
|
||||
// Remember: this will release and reacquire the lock.
|
||||
cluster.serverSynced.Wait()
|
||||
}
|
||||
|
||||
var server *mongoServer
|
||||
if slaveOk {
|
||||
server = cluster.servers.BestFit(mode, serverTags)
|
||||
} else {
|
||||
server = cluster.masters.BestFit(mode, nil)
|
||||
}
|
||||
cluster.RUnlock()
|
||||
|
||||
if server == nil {
|
||||
// Must have failed the requested tags. Sleep to avoid spinning.
|
||||
time.Sleep(1e8)
|
||||
continue
|
||||
}
|
||||
|
||||
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
|
||||
if err == errPoolLimit {
|
||||
if !warnedLimit {
|
||||
warnedLimit = true
|
||||
log("WARNING: Per-server connection limit reached.")
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
cluster.removeServer(server)
|
||||
cluster.syncServers()
|
||||
continue
|
||||
}
|
||||
if abended && !slaveOk {
|
||||
var result isMasterResult
|
||||
err := cluster.isMaster(s, &result)
|
||||
if err != nil || !result.IsMaster {
|
||||
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
|
||||
s.Release()
|
||||
cluster.syncServers()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
panic("unreached")
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
|
||||
cluster.Lock()
|
||||
if cluster.cachedIndex == nil {
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
}
|
||||
if exists {
|
||||
cluster.cachedIndex[cacheKey] = true
|
||||
} else {
|
||||
delete(cluster.cachedIndex, cacheKey)
|
||||
}
|
||||
cluster.Unlock()
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
|
||||
cluster.RLock()
|
||||
if cluster.cachedIndex != nil {
|
||||
result = cluster.cachedIndex[cacheKey]
|
||||
}
|
||||
cluster.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (cluster *mongoCluster) ResetIndexCache() {
|
||||
cluster.Lock()
|
||||
cluster.cachedIndex = make(map[string]bool)
|
||||
cluster.Unlock()
|
||||
}
|
||||
2090
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
2090
vendor/gopkg.in/mgo.v2/cluster_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
196
vendor/gopkg.in/mgo.v2/dbtest/dbserver.go
generated
vendored
196
vendor/gopkg.in/mgo.v2/dbtest/dbserver.go
generated
vendored
@@ -1,196 +0,0 @@
|
||||
package dbtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
// DBServer controls a MongoDB server process to be used within test suites.
|
||||
//
|
||||
// The test server is started when Session is called the first time and should
|
||||
// remain running for the duration of all tests, with the Wipe method being
|
||||
// called between tests (before each of them) to clear stored data. After all tests
|
||||
// are done, the Stop method should be called to stop the test server.
|
||||
//
|
||||
// Before the DBServer is used the SetPath method must be called to define
|
||||
// the location for the database files to be stored.
|
||||
type DBServer struct {
|
||||
session *mgo.Session
|
||||
output bytes.Buffer
|
||||
server *exec.Cmd
|
||||
dbpath string
|
||||
host string
|
||||
tomb tomb.Tomb
|
||||
}
|
||||
|
||||
// SetPath defines the path to the directory where the database files will be
|
||||
// stored if it is started. The directory path itself is not created or removed
|
||||
// by the test helper.
|
||||
func (dbs *DBServer) SetPath(dbpath string) {
|
||||
dbs.dbpath = dbpath
|
||||
}
|
||||
|
||||
func (dbs *DBServer) start() {
|
||||
if dbs.server != nil {
|
||||
panic("DBServer already started")
|
||||
}
|
||||
if dbs.dbpath == "" {
|
||||
panic("DBServer.SetPath must be called before using the server")
|
||||
}
|
||||
mgo.SetStats(true)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
panic("unable to listen on a local address: " + err.Error())
|
||||
}
|
||||
addr := l.Addr().(*net.TCPAddr)
|
||||
l.Close()
|
||||
dbs.host = addr.String()
|
||||
|
||||
args := []string{
|
||||
"--dbpath", dbs.dbpath,
|
||||
"--bind_ip", "127.0.0.1",
|
||||
"--port", strconv.Itoa(addr.Port),
|
||||
"--nssize", "1",
|
||||
"--noprealloc",
|
||||
"--smallfiles",
|
||||
"--nojournal",
|
||||
}
|
||||
dbs.tomb = tomb.Tomb{}
|
||||
dbs.server = exec.Command("mongod", args...)
|
||||
dbs.server.Stdout = &dbs.output
|
||||
dbs.server.Stderr = &dbs.output
|
||||
err = dbs.server.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dbs.tomb.Go(dbs.monitor)
|
||||
dbs.Wipe()
|
||||
}
|
||||
|
||||
func (dbs *DBServer) monitor() error {
|
||||
dbs.server.Process.Wait()
|
||||
if dbs.tomb.Alive() {
|
||||
// Present some debugging information.
|
||||
fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n")
|
||||
fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes())
|
||||
fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n")
|
||||
cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod")
|
||||
cmd.Stdout = os.Stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Run()
|
||||
fmt.Fprintf(os.Stderr, "----------------------------------------\n")
|
||||
|
||||
panic("mongod process died unexpectedly")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the test server process, if it is running.
|
||||
//
|
||||
// It's okay to call Stop multiple times. After the test server is
|
||||
// stopped it cannot be restarted.
|
||||
//
|
||||
// All database sessions must be closed before or while the Stop method
|
||||
// is running. Otherwise Stop will panic after a timeout informing that
|
||||
// there is a session leak.
|
||||
func (dbs *DBServer) Stop() {
|
||||
if dbs.session != nil {
|
||||
dbs.checkSessions()
|
||||
if dbs.session != nil {
|
||||
dbs.session.Close()
|
||||
dbs.session = nil
|
||||
}
|
||||
}
|
||||
if dbs.server != nil {
|
||||
dbs.tomb.Kill(nil)
|
||||
dbs.server.Process.Signal(os.Interrupt)
|
||||
select {
|
||||
case <-dbs.tomb.Dead():
|
||||
case <-time.After(5 * time.Second):
|
||||
panic("timeout waiting for mongod process to die")
|
||||
}
|
||||
dbs.server = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Session returns a new session to the server. The returned session
|
||||
// must be closed after the test is done with it.
|
||||
//
|
||||
// The first Session obtained from a DBServer will start it.
|
||||
func (dbs *DBServer) Session() *mgo.Session {
|
||||
if dbs.server == nil {
|
||||
dbs.start()
|
||||
}
|
||||
if dbs.session == nil {
|
||||
mgo.ResetStats()
|
||||
var err error
|
||||
dbs.session, err = mgo.Dial(dbs.host + "/test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return dbs.session.Copy()
|
||||
}
|
||||
|
||||
// checkSessions ensures all mgo sessions opened were properly closed.
|
||||
// For slightly faster tests, it may be disabled setting the
|
||||
// environmnet variable CHECK_SESSIONS to 0.
|
||||
func (dbs *DBServer) checkSessions() {
|
||||
if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil {
|
||||
return
|
||||
}
|
||||
dbs.session.Close()
|
||||
dbs.session = nil
|
||||
for i := 0; i < 100; i++ {
|
||||
stats := mgo.GetStats()
|
||||
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
panic("There are mgo sessions still alive.")
|
||||
}
|
||||
|
||||
// Wipe drops all created databases and their data.
|
||||
//
|
||||
// The MongoDB server remains running if it was prevoiusly running,
|
||||
// or stopped if it was previously stopped.
|
||||
//
|
||||
// All database sessions must be closed before or while the Wipe method
|
||||
// is running. Otherwise Wipe will panic after a timeout informing that
|
||||
// there is a session leak.
|
||||
func (dbs *DBServer) Wipe() {
|
||||
if dbs.server == nil || dbs.session == nil {
|
||||
return
|
||||
}
|
||||
dbs.checkSessions()
|
||||
sessionUnset := dbs.session == nil
|
||||
session := dbs.Session()
|
||||
defer session.Close()
|
||||
if sessionUnset {
|
||||
dbs.session.Close()
|
||||
dbs.session = nil
|
||||
}
|
||||
names, err := session.DatabaseNames()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, name := range names {
|
||||
switch name {
|
||||
case "admin", "local", "config":
|
||||
default:
|
||||
err = session.DB(name).DropDatabase()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
108
vendor/gopkg.in/mgo.v2/dbtest/dbserver_test.go
generated
vendored
108
vendor/gopkg.in/mgo.v2/dbtest/dbserver_test.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
package dbtest_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/dbtest"
|
||||
)
|
||||
|
||||
type M map[string]interface{}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
type S struct {
|
||||
oldCheckSessions string
|
||||
}
|
||||
|
||||
var _ = Suite(&S{})
|
||||
|
||||
func (s *S) SetUpTest(c *C) {
|
||||
s.oldCheckSessions = os.Getenv("CHECK_SESSIONS")
|
||||
os.Setenv("CHECK_SESSIONS", "")
|
||||
}
|
||||
|
||||
func (s *S) TearDownTest(c *C) {
|
||||
os.Setenv("CHECK_SESSIONS", s.oldCheckSessions)
|
||||
}
|
||||
|
||||
func (s *S) TestWipeData(c *C) {
|
||||
var server dbtest.DBServer
|
||||
server.SetPath(c.MkDir())
|
||||
defer server.Stop()
|
||||
|
||||
session := server.Session()
|
||||
err := session.DB("mydb").C("mycoll").Insert(M{"a": 1})
|
||||
session.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
server.Wipe()
|
||||
|
||||
session = server.Session()
|
||||
names, err := session.DatabaseNames()
|
||||
session.Close()
|
||||
c.Assert(err, IsNil)
|
||||
for _, name := range names {
|
||||
if name != "local" && name != "admin" {
|
||||
c.Fatalf("Wipe should have removed this database: %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestStop(c *C) {
|
||||
var server dbtest.DBServer
|
||||
server.SetPath(c.MkDir())
|
||||
defer server.Stop()
|
||||
|
||||
// Server should not be running.
|
||||
process := server.ProcessTest()
|
||||
c.Assert(process, IsNil)
|
||||
|
||||
session := server.Session()
|
||||
addr := session.LiveServers()[0]
|
||||
session.Close()
|
||||
|
||||
// Server should be running now.
|
||||
process = server.ProcessTest()
|
||||
p, err := os.FindProcess(process.Pid)
|
||||
c.Assert(err, IsNil)
|
||||
p.Release()
|
||||
|
||||
server.Stop()
|
||||
|
||||
// Server should not be running anymore.
|
||||
session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond)
|
||||
if session != nil {
|
||||
session.Close()
|
||||
c.Fatalf("Stop did not stop the server")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestCheckSessions(c *C) {
|
||||
var server dbtest.DBServer
|
||||
server.SetPath(c.MkDir())
|
||||
defer server.Stop()
|
||||
|
||||
session := server.Session()
|
||||
defer session.Close()
|
||||
c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.")
|
||||
}
|
||||
|
||||
func (s *S) TestCheckSessionsDisabled(c *C) {
|
||||
var server dbtest.DBServer
|
||||
server.SetPath(c.MkDir())
|
||||
defer server.Stop()
|
||||
|
||||
os.Setenv("CHECK_SESSIONS", "0")
|
||||
|
||||
// Should not panic, although it looks to Wipe like this session will leak.
|
||||
session := server.Session()
|
||||
defer session.Close()
|
||||
server.Wipe()
|
||||
}
|
||||
12
vendor/gopkg.in/mgo.v2/dbtest/export_test.go
generated
vendored
12
vendor/gopkg.in/mgo.v2/dbtest/export_test.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
package dbtest
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func (dbs *DBServer) ProcessTest() *os.Process {
|
||||
if dbs.server == nil {
|
||||
return nil
|
||||
}
|
||||
return dbs.server.Process
|
||||
}
|
||||
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
31
vendor/gopkg.in/mgo.v2/doc.go
generated
vendored
@@ -1,31 +0,0 @@
|
||||
// Package mgo offers a rich MongoDB driver for Go.
|
||||
//
|
||||
// Details about the mgo project (pronounced as "mango") are found
|
||||
// in its web page:
|
||||
//
|
||||
// http://labix.org/mgo
|
||||
//
|
||||
// Usage of the driver revolves around the concept of sessions. To
|
||||
// get started, obtain a session using the Dial function:
|
||||
//
|
||||
// session, err := mgo.Dial(url)
|
||||
//
|
||||
// This will establish one or more connections with the cluster of
|
||||
// servers defined by the url parameter. From then on, the cluster
|
||||
// may be queried with multiple consistency rules (see SetMode) and
|
||||
// documents retrieved with statements such as:
|
||||
//
|
||||
// c := session.DB(database).C(collection)
|
||||
// err := c.Find(query).One(&result)
|
||||
//
|
||||
// New sessions are typically created by calling session.Copy on the
|
||||
// initial session obtained at dial time. These new sessions will share
|
||||
// the same cluster information and connection pool, and may be easily
|
||||
// handed into other methods and functions for organizing logic.
|
||||
// Every session created must have its Close method called at the end
|
||||
// of its life time, so its resources may be put back in the pool or
|
||||
// collected, depending on the case.
|
||||
//
|
||||
// For more details, see the documentation for the types and methods.
|
||||
//
|
||||
package mgo
|
||||
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
33
vendor/gopkg.in/mgo.v2/export_test.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func HackPingDelay(newDelay time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldDelay := pingDelay
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
pingDelay = oldDelay
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
pingDelay = newDelay
|
||||
return
|
||||
}
|
||||
|
||||
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
|
||||
globalMutex.Lock()
|
||||
defer globalMutex.Unlock()
|
||||
|
||||
oldTimeout := syncSocketTimeout
|
||||
restore = func() {
|
||||
globalMutex.Lock()
|
||||
syncSocketTimeout = oldTimeout
|
||||
globalMutex.Unlock()
|
||||
}
|
||||
syncSocketTimeout = newTimeout
|
||||
return
|
||||
}
|
||||
761
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
761
vendor/gopkg.in/mgo.v2/gridfs.go
generated
vendored
@@ -1,761 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type GridFS struct {
|
||||
Files *Collection
|
||||
Chunks *Collection
|
||||
}
|
||||
|
||||
type gfsFileMode int
|
||||
|
||||
const (
|
||||
gfsClosed gfsFileMode = 0
|
||||
gfsReading gfsFileMode = 1
|
||||
gfsWriting gfsFileMode = 2
|
||||
)
|
||||
|
||||
type GridFile struct {
|
||||
m sync.Mutex
|
||||
c sync.Cond
|
||||
gfs *GridFS
|
||||
mode gfsFileMode
|
||||
err error
|
||||
|
||||
chunk int
|
||||
offset int64
|
||||
|
||||
wpending int
|
||||
wbuf []byte
|
||||
wsum hash.Hash
|
||||
|
||||
rbuf []byte
|
||||
rcache *gfsCachedChunk
|
||||
|
||||
doc gfsFile
|
||||
}
|
||||
|
||||
type gfsFile struct {
|
||||
Id interface{} "_id"
|
||||
ChunkSize int "chunkSize"
|
||||
UploadDate time.Time "uploadDate"
|
||||
Length int64 ",minsize"
|
||||
MD5 string
|
||||
Filename string ",omitempty"
|
||||
ContentType string "contentType,omitempty"
|
||||
Metadata *bson.Raw ",omitempty"
|
||||
}
|
||||
|
||||
type gfsChunk struct {
|
||||
Id interface{} "_id"
|
||||
FilesId interface{} "files_id"
|
||||
N int
|
||||
Data []byte
|
||||
}
|
||||
|
||||
type gfsCachedChunk struct {
|
||||
wait sync.Mutex
|
||||
n int
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newGridFS(db *Database, prefix string) *GridFS {
|
||||
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
|
||||
}
|
||||
|
||||
func (gfs *GridFS) newFile() *GridFile {
|
||||
file := &GridFile{gfs: gfs}
|
||||
file.c.L = &file.m
|
||||
//runtime.SetFinalizer(file, finalizeFile)
|
||||
return file
|
||||
}
|
||||
|
||||
func finalizeFile(file *GridFile) {
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// Create creates a new file with the provided name in the GridFS. If the file
|
||||
// name already exists, a new version will be inserted with an up-to-date
|
||||
// uploadDate that will cause it to be atomically visible to the Open and
|
||||
// OpenId methods. If the file name is not important, an empty name may be
|
||||
// provided and the file Id used instead.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// A simple example inserting a new file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// n, err := file.Write([]byte("Hello world!"))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes written\n", n)
|
||||
//
|
||||
// The io.Writer interface is implemented by *GridFile and may be used to
|
||||
// help on the file creation. For example:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Create("myfile.txt")
|
||||
// check(err)
|
||||
// messages, err := os.Open("/var/log/messages")
|
||||
// check(err)
|
||||
// defer messages.Close()
|
||||
// err = io.Copy(file, messages)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsWriting
|
||||
file.wsum = md5.New()
|
||||
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenId returns the file with the provided id, for reading.
|
||||
// If the file isn't found, err will be set to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// func check(err error) {
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// }
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").OpenId(objid)
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// Open returns the most recently uploaded file with the provided
|
||||
// name, for reading. If the file isn't found, err will be set
|
||||
// to mgo.ErrNotFound.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
//
|
||||
// The following example will print the first 8192 bytes from the file:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// b := make([]byte, 8192)
|
||||
// n, err := file.Read(b)
|
||||
// check(err)
|
||||
// fmt.Println(string(b))
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
// fmt.Printf("%d bytes read\n", n)
|
||||
//
|
||||
// The io.Reader interface is implemented by *GridFile and may be used to
|
||||
// deal with it. As an example, the following snippet will dump the whole
|
||||
// file into the standard output:
|
||||
//
|
||||
// file, err := db.GridFS("fs").Open("myfile.txt")
|
||||
// check(err)
|
||||
// err = io.Copy(os.Stdout, file)
|
||||
// check(err)
|
||||
// err = file.Close()
|
||||
// check(err)
|
||||
//
|
||||
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
|
||||
var doc gfsFile
|
||||
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file = gfs.newFile()
|
||||
file.mode = gfsReading
|
||||
file.doc = doc
|
||||
return
|
||||
}
|
||||
|
||||
// OpenNext opens the next file from iter for reading, sets *file to it,
|
||||
// and returns true on the success case. If no more documents are available
|
||||
// on iter or an error occurred, *file is set to nil and the result is false.
|
||||
// Errors will be available via iter.Err().
|
||||
//
|
||||
// The iter parameter must be an iterator on the GridFS files collection.
|
||||
// Using the GridFS.Find method is an easy way to obtain such an iterator,
|
||||
// but any iterator on the collection will work.
|
||||
//
|
||||
// If the provided *file is non-nil, OpenNext will close it before attempting
|
||||
// to iterate to the next element. This means that in a loop one only
|
||||
// has to worry about closing files when breaking out of the loop early
|
||||
// (break, return, or panic).
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// query := gfs.Find(nil).Sort("filename")
|
||||
// iter := query.Iter()
|
||||
// var f *mgo.GridFile
|
||||
// for gfs.OpenNext(iter, &f) {
|
||||
// fmt.Printf("Filename: %s\n", f.Name())
|
||||
// }
|
||||
// if iter.Close() != nil {
|
||||
// panic(iter.Close())
|
||||
// }
|
||||
//
|
||||
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
|
||||
if *file != nil {
|
||||
// Ignoring the error here shouldn't be a big deal
|
||||
// as we're reading the file and the loop iteration
|
||||
// for this file is finished.
|
||||
_ = (*file).Close()
|
||||
}
|
||||
var doc gfsFile
|
||||
if !iter.Next(&doc) {
|
||||
*file = nil
|
||||
return false
|
||||
}
|
||||
f := gfs.newFile()
|
||||
f.mode = gfsReading
|
||||
f.doc = doc
|
||||
*file = f
|
||||
return true
|
||||
}
|
||||
|
||||
// Find runs query on GridFS's files collection and returns
|
||||
// the resulting Query.
|
||||
//
|
||||
// This logic:
|
||||
//
|
||||
// gfs := db.GridFS("fs")
|
||||
// iter := gfs.Find(nil).Iter()
|
||||
//
|
||||
// Is equivalent to:
|
||||
//
|
||||
// files := db.C("fs" + ".files")
|
||||
// iter := files.Find(nil).Iter()
|
||||
//
|
||||
func (gfs *GridFS) Find(query interface{}) *Query {
|
||||
return gfs.Files.Find(query)
|
||||
}
|
||||
|
||||
// RemoveId deletes the file with the provided id from the GridFS.
|
||||
func (gfs *GridFS) RemoveId(id interface{}) error {
|
||||
err := gfs.Files.Remove(bson.M{"_id": id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
|
||||
return err
|
||||
}
|
||||
|
||||
type gfsDocId struct {
|
||||
Id interface{} "_id"
|
||||
}
|
||||
|
||||
// Remove deletes all files with the provided name from the GridFS.
|
||||
func (gfs *GridFS) Remove(name string) (err error) {
|
||||
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
|
||||
var doc gfsDocId
|
||||
for iter.Next(&doc) {
|
||||
if e := gfs.RemoveId(doc.Id); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = iter.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (file *GridFile) assertMode(mode gfsFileMode) {
|
||||
switch file.mode {
|
||||
case mode:
|
||||
return
|
||||
case gfsWriting:
|
||||
panic("GridFile is open for writing")
|
||||
case gfsReading:
|
||||
panic("GridFile is open for reading")
|
||||
case gfsClosed:
|
||||
panic("GridFile is closed")
|
||||
default:
|
||||
panic("internal error: missing GridFile mode")
|
||||
}
|
||||
}
|
||||
|
||||
// SetChunkSize sets size of saved chunks. Once the file is written to, it
|
||||
// will be split in blocks of that size and each block saved into an
|
||||
// independent chunk document. The default chunk size is 255kb.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to.
|
||||
func (file *GridFile) SetChunkSize(bytes int) {
|
||||
file.assertMode(gfsWriting)
|
||||
debugf("GridFile %p: setting chunk size to %d", file, bytes)
|
||||
file.m.Lock()
|
||||
file.doc.ChunkSize = bytes
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Id returns the current file Id.
|
||||
func (file *GridFile) Id() interface{} {
|
||||
return file.doc.Id
|
||||
}
|
||||
|
||||
// SetId changes the current file Id.
|
||||
//
|
||||
// It is a runtime error to call this function once the file has started
|
||||
// being written to, or when the file is not open for writing.
|
||||
func (file *GridFile) SetId(id interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Id = id
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Name returns the optional file name. An empty string will be returned
|
||||
// in case it is unset.
|
||||
func (file *GridFile) Name() string {
|
||||
return file.doc.Filename
|
||||
}
|
||||
|
||||
// SetName changes the optional file name. An empty string may be used to
|
||||
// unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetName(name string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.Filename = name
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// ContentType returns the optional file content type. An empty string will be
|
||||
// returned in case it is unset.
|
||||
func (file *GridFile) ContentType() string {
|
||||
return file.doc.ContentType
|
||||
}
|
||||
|
||||
// ContentType changes the optional file content type. An empty string may be
|
||||
// used to unset it.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetContentType(ctype string) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.ContentType = ctype
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// GetMeta unmarshals the optional "metadata" field associated with the
|
||||
// file into the result parameter. The meaning of keys under that field
|
||||
// is user-defined. For example:
|
||||
//
|
||||
// result := struct{ INode int }{}
|
||||
// err = file.GetMeta(&result)
|
||||
// if err != nil {
|
||||
// panic(err.String())
|
||||
// }
|
||||
// fmt.Printf("inode: %d\n", result.INode)
|
||||
//
|
||||
func (file *GridFile) GetMeta(result interface{}) (err error) {
|
||||
file.m.Lock()
|
||||
if file.doc.Metadata != nil {
|
||||
err = bson.Unmarshal(file.doc.Metadata.Data, result)
|
||||
}
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetMeta changes the optional "metadata" field associated with the
|
||||
// file. The meaning of keys under that field is user-defined.
|
||||
// For example:
|
||||
//
|
||||
// file.SetMeta(bson.M{"inode": inode})
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetMeta(metadata interface{}) {
|
||||
file.assertMode(gfsWriting)
|
||||
data, err := bson.Marshal(metadata)
|
||||
file.m.Lock()
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
} else {
|
||||
file.doc.Metadata = &bson.Raw{Data: data}
|
||||
}
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Size returns the file size in bytes.
|
||||
func (file *GridFile) Size() (bytes int64) {
|
||||
file.m.Lock()
|
||||
bytes = file.doc.Length
|
||||
file.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// MD5 returns the file MD5 as a hex-encoded string.
|
||||
func (file *GridFile) MD5() (md5 string) {
|
||||
return file.doc.MD5
|
||||
}
|
||||
|
||||
// UploadDate returns the file upload time.
|
||||
func (file *GridFile) UploadDate() time.Time {
|
||||
return file.doc.UploadDate
|
||||
}
|
||||
|
||||
// SetUploadDate changes the file upload time.
|
||||
//
|
||||
// It is a runtime error to call this function when the file is not open
|
||||
// for writing.
|
||||
func (file *GridFile) SetUploadDate(t time.Time) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
file.doc.UploadDate = t
|
||||
file.m.Unlock()
|
||||
}
|
||||
|
||||
// Close flushes any pending changes in case the file is being written
|
||||
// to, waits for any background operations to finish, and closes the file.
|
||||
//
|
||||
// It's important to Close files whether they are being written to
|
||||
// or read from, and to check the err result to ensure the operation
|
||||
// completed successfully.
|
||||
func (file *GridFile) Close() (err error) {
|
||||
file.m.Lock()
|
||||
defer file.m.Unlock()
|
||||
if file.mode == gfsWriting {
|
||||
if len(file.wbuf) > 0 && file.err == nil {
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
file.completeWrite()
|
||||
} else if file.mode == gfsReading && file.rcache != nil {
|
||||
file.rcache.wait.Lock()
|
||||
file.rcache = nil
|
||||
}
|
||||
file.mode = gfsClosed
|
||||
debugf("GridFile %p: closed", file)
|
||||
return file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) completeWrite() {
|
||||
for file.wpending > 0 {
|
||||
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
|
||||
file.c.Wait()
|
||||
}
|
||||
if file.err == nil {
|
||||
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
|
||||
if file.doc.UploadDate.IsZero() {
|
||||
file.doc.UploadDate = bson.Now()
|
||||
}
|
||||
file.doc.MD5 = hexsum
|
||||
file.err = file.gfs.Files.Insert(file.doc)
|
||||
}
|
||||
if file.err != nil {
|
||||
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
|
||||
}
|
||||
if file.err == nil {
|
||||
index := Index{
|
||||
Key: []string{"files_id", "n"},
|
||||
Unique: true,
|
||||
}
|
||||
file.err = file.gfs.Chunks.EnsureIndex(index)
|
||||
}
|
||||
}
|
||||
|
||||
// Abort cancels an in-progress write, preventing the file from being
|
||||
// automically created and ensuring previously written chunks are
|
||||
// removed when the file is closed.
|
||||
//
|
||||
// It is a runtime error to call Abort when the file was not opened
|
||||
// for writing.
|
||||
func (file *GridFile) Abort() {
|
||||
if file.mode != gfsWriting {
|
||||
panic("file.Abort must be called on file opened for writing")
|
||||
}
|
||||
file.err = errors.New("write aborted")
|
||||
}
|
||||
|
||||
// Write writes the provided data to the file and returns the
|
||||
// number of bytes written and an error in case something
|
||||
// wrong happened.
|
||||
//
|
||||
// The file will internally cache the data so that all but the last
|
||||
// chunk sent to the database have the size defined by SetChunkSize.
|
||||
// This also means that errors may be deferred until a future call
|
||||
// to Write or Close.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Writer.
|
||||
func (file *GridFile) Write(data []byte) (n int, err error) {
|
||||
file.assertMode(gfsWriting)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: writing %d bytes", file, len(data))
|
||||
defer file.m.Unlock()
|
||||
|
||||
if file.err != nil {
|
||||
return 0, file.err
|
||||
}
|
||||
|
||||
n = len(data)
|
||||
file.doc.Length += int64(n)
|
||||
chunkSize := file.doc.ChunkSize
|
||||
|
||||
if len(file.wbuf)+len(data) < chunkSize {
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
return
|
||||
}
|
||||
|
||||
// First, flush file.wbuf complementing with data.
|
||||
if len(file.wbuf) > 0 {
|
||||
missing := chunkSize - len(file.wbuf)
|
||||
if missing > len(data) {
|
||||
missing = len(data)
|
||||
}
|
||||
file.wbuf = append(file.wbuf, data[:missing]...)
|
||||
data = data[missing:]
|
||||
file.insertChunk(file.wbuf)
|
||||
file.wbuf = file.wbuf[0:0]
|
||||
}
|
||||
|
||||
// Then, flush all chunks from data without copying.
|
||||
for len(data) > chunkSize {
|
||||
size := chunkSize
|
||||
if size > len(data) {
|
||||
size = len(data)
|
||||
}
|
||||
file.insertChunk(data[:size])
|
||||
data = data[size:]
|
||||
}
|
||||
|
||||
// And append the rest for a future call.
|
||||
file.wbuf = append(file.wbuf, data...)
|
||||
|
||||
return n, file.err
|
||||
}
|
||||
|
||||
func (file *GridFile) insertChunk(data []byte) {
|
||||
n := file.chunk
|
||||
file.chunk++
|
||||
debugf("GridFile %p: adding to checksum: %q", file, string(data))
|
||||
file.wsum.Write(data)
|
||||
|
||||
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
|
||||
// Hold on.. we got a MB pending.
|
||||
file.c.Wait()
|
||||
if file.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
file.wpending++
|
||||
|
||||
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
|
||||
|
||||
// We may not own the memory of data, so rather than
|
||||
// simply copying it, we'll marshal the document ahead of time.
|
||||
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
|
||||
if err != nil {
|
||||
file.err = err
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
|
||||
file.m.Lock()
|
||||
file.wpending--
|
||||
if err != nil && file.err == nil {
|
||||
file.err = err
|
||||
}
|
||||
file.c.Broadcast()
|
||||
file.m.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read or Write on file to
|
||||
// offset, interpreted according to whence: 0 means relative to
|
||||
// the origin of the file, 1 means relative to the current offset,
|
||||
// and 2 means relative to the end. It returns the new offset and
|
||||
// an error, if any.
|
||||
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
|
||||
defer file.m.Unlock()
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
case os.SEEK_CUR:
|
||||
offset += file.offset
|
||||
case os.SEEK_END:
|
||||
offset += file.doc.Length
|
||||
default:
|
||||
panic("unsupported whence value")
|
||||
}
|
||||
if offset > file.doc.Length {
|
||||
return file.offset, errors.New("seek past end of file")
|
||||
}
|
||||
if offset == file.doc.Length {
|
||||
// If we're seeking to the end of the file,
|
||||
// no need to read anything. This enables
|
||||
// a client to find the size of the file using only the
|
||||
// io.ReadSeeker interface with low overhead.
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
chunk := int(offset / int64(file.doc.ChunkSize))
|
||||
if chunk+1 == file.chunk && offset >= file.offset {
|
||||
file.rbuf = file.rbuf[int(offset-file.offset):]
|
||||
file.offset = offset
|
||||
return file.offset, nil
|
||||
}
|
||||
file.offset = offset
|
||||
file.chunk = chunk
|
||||
file.rbuf = nil
|
||||
file.rbuf, err = file.getChunk()
|
||||
if err == nil {
|
||||
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
|
||||
}
|
||||
return file.offset, err
|
||||
}
|
||||
|
||||
// Read reads into b the next available data from the file and
|
||||
// returns the number of bytes written and an error in case
|
||||
// something wrong happened. At the end of the file, n will
|
||||
// be zero and err will be set to io.EOF.
|
||||
//
|
||||
// The parameters and behavior of this function turn the file
|
||||
// into an io.Reader.
|
||||
func (file *GridFile) Read(b []byte) (n int, err error) {
|
||||
file.assertMode(gfsReading)
|
||||
file.m.Lock()
|
||||
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
|
||||
defer file.m.Unlock()
|
||||
if file.offset == file.doc.Length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
for err == nil {
|
||||
i := copy(b, file.rbuf)
|
||||
n += i
|
||||
file.offset += int64(i)
|
||||
file.rbuf = file.rbuf[i:]
|
||||
if i == len(b) || file.offset == file.doc.Length {
|
||||
break
|
||||
}
|
||||
b = b[i:]
|
||||
file.rbuf, err = file.getChunk()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (file *GridFile) getChunk() (data []byte, err error) {
|
||||
cache := file.rcache
|
||||
file.rcache = nil
|
||||
if cache != nil && cache.n == file.chunk {
|
||||
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
|
||||
cache.wait.Lock()
|
||||
data, err = cache.data, cache.err
|
||||
} else {
|
||||
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
|
||||
var doc gfsChunk
|
||||
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
|
||||
data = doc.Data
|
||||
}
|
||||
file.chunk++
|
||||
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
|
||||
// Read the next one in background.
|
||||
cache = &gfsCachedChunk{n: file.chunk}
|
||||
cache.wait.Lock()
|
||||
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
|
||||
// Clone the session to avoid having it closed in between.
|
||||
chunks := file.gfs.Chunks
|
||||
session := chunks.Database.Session.Clone()
|
||||
go func(id interface{}, n int) {
|
||||
defer session.Close()
|
||||
chunks = chunks.With(session)
|
||||
var doc gfsChunk
|
||||
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
|
||||
cache.data = doc.Data
|
||||
cache.wait.Unlock()
|
||||
}(file.doc.Id, file.chunk)
|
||||
file.rcache = cache
|
||||
}
|
||||
debugf("Returning err: %#v", err)
|
||||
return
|
||||
}
|
||||
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
708
vendor/gopkg.in/mgo.v2/gridfs_test.go
generated
vendored
@@ -1,708 +0,0 @@
|
||||
// mgo - MongoDB driver for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package mgo_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
func (s *S) TestGridFSCreate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
before := bson.Now()
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
after := bson.Now()
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
ud, ok := result["uploadDate"].(time.Time)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunk.
|
||||
result = M{}
|
||||
err = db.C("fs.chunks").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
chunkId, ok := result["_id"].(bson.ObjectId)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(chunkId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": 0,
|
||||
"data": []byte("some data"),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check that an index was created.
|
||||
indexes, err := db.C("fs.chunks").Indexes()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(indexes), Equals, 2)
|
||||
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSFileDetails(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err := file.Write([]byte("some"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(4))
|
||||
|
||||
n, err = file.Write([]byte(" data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 5)
|
||||
|
||||
c.Assert(file.Size(), Equals, int64(9))
|
||||
|
||||
id, _ := file.Id().(bson.ObjectId)
|
||||
c.Assert(id.Valid(), Equals, true)
|
||||
c.Assert(file.Name(), Equals, "myfile1.txt")
|
||||
c.Assert(file.ContentType(), Equals, "")
|
||||
|
||||
var info interface{}
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, IsNil)
|
||||
|
||||
file.SetId("myid")
|
||||
file.SetName("myfile2.txt")
|
||||
file.SetContentType("text/plain")
|
||||
file.SetMeta(M{"any": "thing"})
|
||||
|
||||
c.Assert(file.Id(), Equals, "myid")
|
||||
c.Assert(file.Name(), Equals, "myfile2.txt")
|
||||
c.Assert(file.ContentType(), Equals, "text/plain")
|
||||
|
||||
err = file.GetMeta(&info)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
|
||||
|
||||
ud := file.UploadDate()
|
||||
now := time.Now()
|
||||
c.Assert(ud.Before(now), Equals, true)
|
||||
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
|
||||
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "myid",
|
||||
"length": 9,
|
||||
"chunkSize": 255 * 1024,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "1e50210a0202497fb79bc38b6ade6c34",
|
||||
"filename": "myfile2.txt",
|
||||
"contentType": "text/plain",
|
||||
"metadata": M{"any": "thing"},
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSetUploadDate(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
|
||||
file.SetUploadDate(t)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
ud := result["uploadDate"].(time.Time)
|
||||
if !ud.Equal(t) {
|
||||
c.Fatalf("want upload date %s, got %s", t, ud)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCreateWithChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err := file.Write([]byte("abc"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Write([]byte("defg"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Write([]byte("hij"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Write([]byte("klmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the file information.
|
||||
result := M{}
|
||||
err = db.C("fs.files").Find(nil).One(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
fileId, _ := result["_id"].(bson.ObjectId)
|
||||
c.Assert(fileId.Valid(), Equals, true)
|
||||
result["_id"] = "<id>"
|
||||
result["uploadDate"] = "<timestamp>"
|
||||
|
||||
expected := M{
|
||||
"_id": "<id>",
|
||||
"length": 22,
|
||||
"chunkSize": 5,
|
||||
"uploadDate": "<timestamp>",
|
||||
"md5": "44a66044834cbe55040089cabfc102d5",
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
|
||||
// Check the chunks.
|
||||
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
|
||||
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
|
||||
for i := 0; ; i++ {
|
||||
result = M{}
|
||||
if !iter.Next(result) {
|
||||
if i != 5 {
|
||||
c.Fatalf("Expected 5 chunks, got %d", i)
|
||||
}
|
||||
break
|
||||
}
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
|
||||
result["_id"] = "<id>"
|
||||
|
||||
expected = M{
|
||||
"_id": "<id>",
|
||||
"files_id": fileId,
|
||||
"n": i,
|
||||
"data": []byte(dataChunks[i]),
|
||||
}
|
||||
c.Assert(result, DeepEquals, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSAbort(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 9)
|
||||
|
||||
var count int
|
||||
for i := 0; i < 10; i++ {
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
if count > 0 || err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 1)
|
||||
|
||||
file.Abort()
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, ErrorMatches, "write aborted")
|
||||
|
||||
count, err = db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSCloseConflict(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
|
||||
|
||||
// For a closing-time conflict
|
||||
err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("foo.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = file.Write([]byte("some data"))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(mgo.IsDup(err), Equals, true)
|
||||
|
||||
count, err := db.C("fs.chunks").Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(count, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNotFound(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.OpenId("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
|
||||
file, err = gfs.Open("non-existent")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
c.Assert(file, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadAll(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 22)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSReadChunking(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 30)
|
||||
|
||||
// Smaller than the chunk size.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("abc"))
|
||||
|
||||
// Boundary in the middle.
|
||||
n, err = file.Read(b[:4])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 4)
|
||||
c.Assert(b[:4], DeepEquals, []byte("defg"))
|
||||
|
||||
// Boundary at the end.
|
||||
n, err = file.Read(b[:3])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 3)
|
||||
c.Assert(b[:3], DeepEquals, []byte("hij"))
|
||||
|
||||
// Larger than the chunk size, with 3 chunks.
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 12)
|
||||
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
|
||||
|
||||
n, err = file.Read(b)
|
||||
c.Assert(n, Equals, 0)
|
||||
c.Assert(err == io.EOF, Equals, true)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpen(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSSeek(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
file, err := gfs.Create("")
|
||||
c.Assert(err, IsNil)
|
||||
id := file.Id()
|
||||
|
||||
file.SetChunkSize(5)
|
||||
|
||||
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 22)
|
||||
|
||||
err = file.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b := make([]byte, 5)
|
||||
|
||||
file, err = gfs.OpenId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
o, err := file.Seek(3, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(3))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("defgh"))
|
||||
|
||||
o, err = file.Seek(5, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(13))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("nopqr"))
|
||||
|
||||
o, err = file.Seek(0, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(22))
|
||||
n, err = file.Read(b)
|
||||
c.Assert(err, Equals, io.EOF)
|
||||
c.Assert(n, Equals, 0)
|
||||
|
||||
o, err = file.Seek(-10, os.SEEK_END)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(12))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("mnopq"))
|
||||
|
||||
o, err = file.Seek(8, os.SEEK_SET)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(8))
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("ijklm"))
|
||||
|
||||
// Trivial seek forward within same chunk. Already
|
||||
// got the data, shouldn't touch the database.
|
||||
sent := mgo.GetStats().SentOps
|
||||
o, err = file.Seek(1, os.SEEK_CUR)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(o, Equals, int64(14))
|
||||
c.Assert(mgo.GetStats().SentOps, Equals, sent)
|
||||
_, err = file.Read(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(b, DeepEquals, []byte("opqrs"))
|
||||
|
||||
// Try seeking past end of file.
|
||||
file.Seek(3, os.SEEK_SET)
|
||||
o, err = file.Seek(23, os.SEEK_SET)
|
||||
c.Assert(err, ErrorMatches, "seek past end of file")
|
||||
c.Assert(o, Equals, int64(3))
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemoveId(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
id := file.Id()
|
||||
file.Close()
|
||||
|
||||
err = gfs.RemoveId(id)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
file, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
defer file.Close()
|
||||
|
||||
var b [1]byte
|
||||
|
||||
_, err = file.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSRemove(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
err = gfs.Remove("myfile.txt")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = gfs.Open("myfile.txt")
|
||||
c.Assert(err == mgo.ErrNotFound, Equals, true)
|
||||
|
||||
n, err := db.C("fs.chunks").Find(nil).Count()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(n, Equals, 0)
|
||||
}
|
||||
|
||||
func (s *S) TestGridFSOpenNext(c *C) {
|
||||
session, err := mgo.Dial("localhost:40011")
|
||||
c.Assert(err, IsNil)
|
||||
defer session.Close()
|
||||
|
||||
db := session.DB("mydb")
|
||||
|
||||
gfs := db.GridFS("fs")
|
||||
|
||||
file, err := gfs.Create("myfile1.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'1'})
|
||||
file.Close()
|
||||
|
||||
file, err = gfs.Create("myfile2.txt")
|
||||
c.Assert(err, IsNil)
|
||||
file.Write([]byte{'2'})
|
||||
file.Close()
|
||||
|
||||
var f *mgo.GridFile
|
||||
var b [1]byte
|
||||
|
||||
iter := gfs.Find(nil).Sort("-filename").Iter()
|
||||
|
||||
ok := gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile2.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "2")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
_, err = f.Read(b[:])
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b[:]), Equals, "1")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
|
||||
// Do it again with a more restrictive query to make sure
|
||||
// it's actually taken into account.
|
||||
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Check(f.Name(), Equals, "myfile1.txt")
|
||||
|
||||
ok = gfs.OpenNext(iter, &f)
|
||||
c.Assert(ok, Equals, false)
|
||||
c.Assert(iter.Close(), IsNil)
|
||||
c.Assert(f, IsNil)
|
||||
}
|
||||
20
vendor/gopkg.in/mgo.v2/harness/certs/client.crt
generated
vendored
20
vendor/gopkg.in/mgo.v2/harness/certs/client.crt
generated
vendored
@@ -1,20 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
|
||||
BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
|
||||
cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
|
||||
OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
|
||||
DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
|
||||
b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
|
||||
4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
|
||||
616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
|
||||
AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
|
||||
7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
|
||||
Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
|
||||
l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
|
||||
CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
|
||||
DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
|
||||
PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
|
||||
OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
|
||||
/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
|
||||
z3A=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/gopkg.in/mgo.v2/harness/certs/client.key
generated
vendored
27
vendor/gopkg.in/mgo.v2/harness/certs/client.key
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
|
||||
wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
|
||||
r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
|
||||
Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
|
||||
KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
|
||||
Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
|
||||
La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
|
||||
KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
|
||||
bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
|
||||
Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
|
||||
Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
|
||||
QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
|
||||
DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
|
||||
QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
|
||||
Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
|
||||
+HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
|
||||
jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
|
||||
K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
|
||||
HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
|
||||
Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
|
||||
xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
|
||||
28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
|
||||
ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
|
||||
4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
|
||||
I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
17
vendor/gopkg.in/mgo.v2/harness/certs/client.req
generated
vendored
17
vendor/gopkg.in/mgo.v2/harness/certs/client.req
generated
vendored
@@ -1,17 +0,0 @@
|
||||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE
|
||||
BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ
|
||||
bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/
|
||||
h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4
|
||||
GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt
|
||||
CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M
|
||||
3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF
|
||||
zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8
|
||||
8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV
|
||||
sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d
|
||||
ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk
|
||||
ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv
|
||||
KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM
|
||||
vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg
|
||||
tb1s0MA=
|
||||
-----END CERTIFICATE REQUEST-----
|
||||
22
vendor/gopkg.in/mgo.v2/harness/certs/server.crt
generated
vendored
22
vendor/gopkg.in/mgo.v2/harness/certs/server.crt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
|
||||
MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
|
||||
ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
|
||||
A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
|
||||
cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
|
||||
6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
|
||||
IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
|
||||
GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
|
||||
fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
|
||||
JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
|
||||
OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
|
||||
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
|
||||
2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
|
||||
TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
|
||||
nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
|
||||
UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
|
||||
W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
|
||||
yQ==
|
||||
-----END CERTIFICATE-----
|
||||
28
vendor/gopkg.in/mgo.v2/harness/certs/server.key
generated
vendored
28
vendor/gopkg.in/mgo.v2/harness/certs/server.key
generated
vendored
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
|
||||
Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
|
||||
mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
|
||||
xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
|
||||
YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
|
||||
ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
|
||||
uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
|
||||
wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
|
||||
MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
|
||||
wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
|
||||
yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
|
||||
eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
|
||||
ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
|
||||
tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
|
||||
xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
|
||||
MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
|
||||
Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
|
||||
IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
|
||||
Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
|
||||
QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
|
||||
GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
|
||||
4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
|
||||
ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
|
||||
1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
|
||||
9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
|
||||
SruEA1+5bfBRMW0P+h7Qfe4=
|
||||
-----END PRIVATE KEY-----
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty
generated
vendored
BIN
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest
generated
vendored
BIN
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest
generated
vendored
Binary file not shown.
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONCOPTS \
|
||||
--port 40101 \
|
||||
--configsvr
|
||||
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONCOPTS \
|
||||
--port 40102 \
|
||||
--configsvr
|
||||
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
9
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/run
generated
vendored
9
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/run
generated
vendored
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONCOPTS \
|
||||
--port 40103 \
|
||||
--configsvr \
|
||||
--auth \
|
||||
--keyFile=../../certs/keyfile
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db1/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db1/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
15
vendor/gopkg.in/mgo.v2/harness/daemons/db1/run
generated
vendored
15
vendor/gopkg.in/mgo.v2/harness/daemons/db1/run
generated
vendored
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
if [ x$NOIPV6 = x1 ]; then
|
||||
BINDIP="127.0.0.1"
|
||||
else
|
||||
BINDIP="127.0.0.1,::1"
|
||||
fi
|
||||
|
||||
exec mongod $COMMONDOPTSNOIP \
|
||||
--shardsvr \
|
||||
--bind_ip=$BINDIP \
|
||||
--port 40001 \
|
||||
--ipv6
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db2/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db2/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/db2/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/db2/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--port 40002 \
|
||||
--auth
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db3/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/db3/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
12
vendor/gopkg.in/mgo.v2/harness/daemons/db3/run
generated
vendored
12
vendor/gopkg.in/mgo.v2/harness/daemons/db3/run
generated
vendored
@@ -1,12 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--port 40003 \
|
||||
--auth \
|
||||
--sslMode preferSSL \
|
||||
--sslCAFile ../../certs/server.pem \
|
||||
--sslPEMKeyFile ../../certs/server.pem
|
||||
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs1 \
|
||||
--port 40011
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs1 \
|
||||
--port 40012
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs1 \
|
||||
--port 40013
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs2 \
|
||||
--port 40021
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs2 \
|
||||
--port 40022
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs2 \
|
||||
--port 40023
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/run
generated
vendored
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/run
generated
vendored
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs3 \
|
||||
--port 40031 \
|
||||
--keyFile=../../certs/keyfile
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/run
generated
vendored
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/run
generated
vendored
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs3 \
|
||||
--port 40032 \
|
||||
--keyFile=../../certs/keyfile
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/run
generated
vendored
9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/run
generated
vendored
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs3 \
|
||||
--port 40033 \
|
||||
--keyFile=../../certs/keyfile
|
||||
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty
generated
vendored
0
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongod $COMMONDOPTS \
|
||||
--shardsvr \
|
||||
--replSet rs4 \
|
||||
--port 40041
|
||||
3
vendor/gopkg.in/mgo.v2/harness/daemons/s1/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/s1/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
7
vendor/gopkg.in/mgo.v2/harness/daemons/s1/run
generated
vendored
7
vendor/gopkg.in/mgo.v2/harness/daemons/s1/run
generated
vendored
@@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongos $COMMONSOPTS \
|
||||
--port 40201 \
|
||||
--configdb 127.0.0.1:40101
|
||||
3
vendor/gopkg.in/mgo.v2/harness/daemons/s2/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/s2/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
7
vendor/gopkg.in/mgo.v2/harness/daemons/s2/run
generated
vendored
7
vendor/gopkg.in/mgo.v2/harness/daemons/s2/run
generated
vendored
@@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongos $COMMONSOPTS \
|
||||
--port 40202 \
|
||||
--configdb 127.0.0.1:40102
|
||||
3
vendor/gopkg.in/mgo.v2/harness/daemons/s3/log/run
generated
vendored
3
vendor/gopkg.in/mgo.v2/harness/daemons/s3/log/run
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec cat - > log.txt
|
||||
8
vendor/gopkg.in/mgo.v2/harness/daemons/s3/run
generated
vendored
8
vendor/gopkg.in/mgo.v2/harness/daemons/s3/run
generated
vendored
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ../.env
|
||||
|
||||
exec mongos $COMMONSOPTS \
|
||||
--port 40203 \
|
||||
--configdb 127.0.0.1:40103 \
|
||||
--keyFile=../../certs/keyfile
|
||||
66
vendor/gopkg.in/mgo.v2/harness/mongojs/dropall.js
generated
vendored
66
vendor/gopkg.in/mgo.v2/harness/mongojs/dropall.js
generated
vendored
@@ -1,66 +0,0 @@
|
||||
|
||||
var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203]
|
||||
var auth = [40002, 40103, 40203, 40031]
|
||||
var db1 = new Mongo("localhost:40001")
|
||||
|
||||
if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) {
|
||||
ports.push(40003)
|
||||
auth.push(40003)
|
||||
}
|
||||
|
||||
for (var i in ports) {
|
||||
var port = ports[i]
|
||||
var server = "localhost:" + port
|
||||
var mongo = new Mongo("localhost:" + port)
|
||||
var admin = mongo.getDB("admin")
|
||||
|
||||
for (var j in auth) {
|
||||
if (auth[j] == port) {
|
||||
admin.auth("root", "rapadura")
|
||||
admin.system.users.find().forEach(function(u) {
|
||||
if (u.user == "root" || u.user == "reader") {
|
||||
return;
|
||||
}
|
||||
if (typeof admin.dropUser == "function") {
|
||||
mongo.getDB(u.db).dropUser(u.user);
|
||||
} else {
|
||||
admin.removeUser(u.user);
|
||||
}
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
var result = admin.runCommand({"listDatabases": 1})
|
||||
for (var j = 0; j != 100; j++) {
|
||||
if (typeof result.databases != "undefined" || notMaster(result)) {
|
||||
break
|
||||
}
|
||||
result = admin.runCommand({"listDatabases": 1})
|
||||
}
|
||||
if (notMaster(result)) {
|
||||
continue
|
||||
}
|
||||
if (typeof result.databases == "undefined") {
|
||||
print("Could not list databases. Command result:")
|
||||
print(JSON.stringify(result))
|
||||
quit(12)
|
||||
}
|
||||
var dbs = result.databases
|
||||
for (var j = 0; j != dbs.length; j++) {
|
||||
var db = dbs[j]
|
||||
switch (db.name) {
|
||||
case "admin":
|
||||
case "local":
|
||||
case "config":
|
||||
break
|
||||
default:
|
||||
mongo.getDB(db.name).dropDatabase()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function notMaster(result) {
|
||||
return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found"))
|
||||
}
|
||||
|
||||
// vim:ts=4:sw=4:et
|
||||
132
vendor/gopkg.in/mgo.v2/harness/mongojs/init.js
generated
vendored
132
vendor/gopkg.in/mgo.v2/harness/mongojs/init.js
generated
vendored
@@ -1,132 +0,0 @@
|
||||
//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5}
|
||||
var settings = {};
|
||||
|
||||
// We know the master of the first set (pri=1), but not of the second.
|
||||
var rs1cfg = {_id: "rs1",
|
||||
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}},
|
||||
{_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}},
|
||||
{_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}],
|
||||
settings: settings}
|
||||
var rs2cfg = {_id: "rs2",
|
||||
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}},
|
||||
{_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}},
|
||||
{_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}],
|
||||
settings: settings}
|
||||
var rs3cfg = {_id: "rs3",
|
||||
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}},
|
||||
{_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}},
|
||||
{_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}],
|
||||
settings: settings}
|
||||
|
||||
for (var i = 0; i != 60; i++) {
|
||||
try {
|
||||
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
|
||||
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
|
||||
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
|
||||
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
|
||||
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
|
||||
break
|
||||
} catch(err) {
|
||||
print("Can't connect yet...")
|
||||
}
|
||||
sleep(1000)
|
||||
}
|
||||
|
||||
function hasSSL() {
|
||||
return Boolean(db1.serverBuildInfo().OpenSSLVersion)
|
||||
}
|
||||
|
||||
rs1a.runCommand({replSetInitiate: rs1cfg})
|
||||
rs2a.runCommand({replSetInitiate: rs2cfg})
|
||||
rs3a.runCommand({replSetInitiate: rs3cfg})
|
||||
|
||||
function configShards() {
|
||||
cfg1 = new Mongo("127.0.0.1:40201").getDB("admin")
|
||||
cfg1.runCommand({addshard: "127.0.0.1:40001"})
|
||||
cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"})
|
||||
|
||||
cfg2 = new Mongo("127.0.0.1:40202").getDB("admin")
|
||||
cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"})
|
||||
|
||||
cfg3 = new Mongo("127.0.0.1:40203").getDB("admin")
|
||||
cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"})
|
||||
}
|
||||
|
||||
function configAuth() {
|
||||
var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"]
|
||||
if (hasSSL()) {
|
||||
addrs.push("127.0.0.1:40003")
|
||||
}
|
||||
for (var i in addrs) {
|
||||
print("Configuring auth for", addrs[i])
|
||||
var db = new Mongo(addrs[i]).getDB("admin")
|
||||
var v = db.serverBuildInfo().versionArray
|
||||
var timedOut = false
|
||||
if (v < [2, 5]) {
|
||||
db.addUser("root", "rapadura")
|
||||
} else {
|
||||
try {
|
||||
db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
|
||||
} catch (err) {
|
||||
// 3.2 consistently fails replication of creds on 40031 (config server)
|
||||
print("createUser command returned an error: " + err)
|
||||
if (String(err).indexOf("timed out") >= 0) {
|
||||
timedOut = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var i = 0; i < 60; i++) {
|
||||
var ok = db.auth("root", "rapadura")
|
||||
if (ok || !timedOut) {
|
||||
break
|
||||
}
|
||||
sleep(1000);
|
||||
}
|
||||
if (v >= [2, 6]) {
|
||||
db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
|
||||
} else if (v >= [2, 4]) {
|
||||
db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
|
||||
} else {
|
||||
db.addUser("reader", "rapadura", true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function countHealthy(rs) {
|
||||
var status = rs.runCommand({replSetGetStatus: 1})
|
||||
var count = 0
|
||||
var primary = 0
|
||||
if (typeof status.members != "undefined") {
|
||||
for (var i = 0; i != status.members.length; i++) {
|
||||
var m = status.members[i]
|
||||
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
|
||||
count += 1
|
||||
if (m.state == 1) {
|
||||
primary = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (primary == 0) {
|
||||
count = 0
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
|
||||
|
||||
for (var i = 0; i != 60; i++) {
|
||||
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
|
||||
print("Replica sets have", count, "healthy nodes.")
|
||||
if (count == totalRSMembers) {
|
||||
configShards()
|
||||
configAuth()
|
||||
quit(0)
|
||||
}
|
||||
sleep(1000)
|
||||
}
|
||||
|
||||
print("Replica sets didn't sync up properly.")
|
||||
quit(12)
|
||||
|
||||
// vim:ts=4:sw=4:et
|
||||
67
vendor/gopkg.in/mgo.v2/harness/mongojs/wait.js
generated
vendored
67
vendor/gopkg.in/mgo.v2/harness/mongojs/wait.js
generated
vendored
@@ -1,67 +0,0 @@
|
||||
// We know the master of the first set (pri=1), but not of the second.
|
||||
var settings = {}
|
||||
var rs1cfg = {_id: "rs1",
|
||||
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1},
|
||||
{_id: 2, host: "127.0.0.1:40012", priority: 0},
|
||||
{_id: 3, host: "127.0.0.1:40013", priority: 0}]}
|
||||
var rs2cfg = {_id: "rs2",
|
||||
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1},
|
||||
{_id: 2, host: "127.0.0.1:40022", priority: 1},
|
||||
{_id: 3, host: "127.0.0.1:40023", priority: 0}]}
|
||||
var rs3cfg = {_id: "rs3",
|
||||
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1},
|
||||
{_id: 2, host: "127.0.0.1:40032", priority: 1},
|
||||
{_id: 3, host: "127.0.0.1:40033", priority: 1}],
|
||||
settings: settings}
|
||||
|
||||
for (var i = 0; i != 60; i++) {
|
||||
try {
|
||||
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
|
||||
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
|
||||
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
|
||||
rs3a.auth("root", "rapadura")
|
||||
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
|
||||
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
|
||||
break
|
||||
} catch(err) {
|
||||
print("Can't connect yet...")
|
||||
}
|
||||
sleep(1000)
|
||||
}
|
||||
|
||||
function countHealthy(rs) {
|
||||
var status = rs.runCommand({replSetGetStatus: 1})
|
||||
var count = 0
|
||||
var primary = 0
|
||||
if (typeof status.members != "undefined") {
|
||||
for (var i = 0; i != status.members.length; i++) {
|
||||
var m = status.members[i]
|
||||
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
|
||||
count += 1
|
||||
if (m.state == 1) {
|
||||
primary = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (primary == 0) {
|
||||
count = 0
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
|
||||
|
||||
for (var i = 0; i != 90; i++) {
|
||||
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
|
||||
print("Replica sets have", count, "healthy nodes.")
|
||||
if (count == totalRSMembers) {
|
||||
quit(0)
|
||||
}
|
||||
sleep(1000)
|
||||
}
|
||||
|
||||
print("Replica sets didn't sync up properly.")
|
||||
quit(12)
|
||||
|
||||
// vim:ts=4:sw=4:et
|
||||
96
vendor/gopkg.in/mgo.v2/harness/setup.sh
generated
vendored
96
vendor/gopkg.in/mgo.v2/harness/setup.sh
generated
vendored
@@ -1,96 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
LINE="---------------"
|
||||
|
||||
start() {
|
||||
if [ -d _harness ]; then
|
||||
echo "Daemon setup already in place, stop it first."
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p _harness
|
||||
cd _harness
|
||||
cp -a ../harness/daemons .
|
||||
cp -a ../harness/certs .
|
||||
echo keyfile > certs/keyfile
|
||||
chmod 600 certs/keyfile
|
||||
if ! mongod --help | grep -q -- --ssl; then
|
||||
rm -rf daemons/db3
|
||||
fi
|
||||
COUNT=$(ls daemons | wc -l)
|
||||
echo "Running daemons..."
|
||||
svscan daemons &
|
||||
SVSCANPID=$!
|
||||
echo $SVSCANPID > svscan.pid
|
||||
if ! kill -0 $SVSCANPID; then
|
||||
echo "Cannot execute svscan."
|
||||
exit 1
|
||||
fi
|
||||
echo "Starting $COUNT processes..."
|
||||
for i in $(seq 30); do
|
||||
UP=$(svstat daemons/* | grep ' up ' | grep -v ' [0-3] seconds' | wc -l)
|
||||
echo "$UP processes up..."
|
||||
if [ x$COUNT = x$UP ]; then
|
||||
echo "Running setup.js with mongo..."
|
||||
mongo --nodb ../harness/mongojs/init.js
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "Failed to start processes. svstat _harness/daemons/* output:"
|
||||
echo $LINE
|
||||
svstat daemons/*
|
||||
echo $LINE
|
||||
for DAEMON in daemons/*; do
|
||||
if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then
|
||||
echo "Logs for _harness/$DAEMON:"
|
||||
echo $LINE
|
||||
cat $DAEMON/log/log.txt
|
||||
echo $LINE
|
||||
fi
|
||||
done
|
||||
exit 1
|
||||
}
|
||||
|
||||
stop() {
|
||||
if [ -d _harness ]; then
|
||||
cd _harness
|
||||
if [ -f svscan.pid ]; then
|
||||
kill -9 $(cat svscan.pid) 2> /dev/null || true
|
||||
svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true
|
||||
COUNT=$(ls daemons | wc -l)
|
||||
echo "Shutting down $COUNT processes..."
|
||||
while true; do
|
||||
DOWN=$(svstat daemons/* | grep 'supervise not running' | wc -l)
|
||||
echo "$DOWN processes down..."
|
||||
if [ x$DOWN = x$COUNT ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
rm svscan.pid
|
||||
echo "Done."
|
||||
fi
|
||||
cd ..
|
||||
rm -rf _harness
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
if [ ! -f suite_test.go ]; then
|
||||
echo "This script must be run from within the source directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
|
||||
start)
|
||||
start $2
|
||||
;;
|
||||
|
||||
stop)
|
||||
stop $2
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
# vim:ts=4:sw=4:et
|
||||
27
vendor/gopkg.in/mgo.v2/internal/json/LICENSE
generated
vendored
27
vendor/gopkg.in/mgo.v2/internal/json/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
223
vendor/gopkg.in/mgo.v2/internal/json/bench_test.go
generated
vendored
223
vendor/gopkg.in/mgo.v2/internal/json/bench_test.go
generated
vendored
@@ -1,223 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Large data benchmark.
|
||||
// The JSON data is a summary of agl's changes in the
|
||||
// go, webkit, and chromium open source projects.
|
||||
// We benchmark converting between the JSON form
|
||||
// and in-memory data structures.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type codeResponse struct {
|
||||
Tree *codeNode `json:"tree"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
type codeNode struct {
|
||||
Name string `json:"name"`
|
||||
Kids []*codeNode `json:"kids"`
|
||||
CLWeight float64 `json:"cl_weight"`
|
||||
Touches int `json:"touches"`
|
||||
MinT int64 `json:"min_t"`
|
||||
MaxT int64 `json:"max_t"`
|
||||
MeanT int64 `json:"mean_t"`
|
||||
}
|
||||
|
||||
var codeJSON []byte
|
||||
var codeStruct codeResponse
|
||||
|
||||
func codeInit() {
|
||||
f, err := os.Open("testdata/code.json.gz")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
gz, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(gz)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
codeJSON = data
|
||||
|
||||
if err := Unmarshal(codeJSON, &codeStruct); err != nil {
|
||||
panic("unmarshal code.json: " + err.Error())
|
||||
}
|
||||
|
||||
if data, err = Marshal(&codeStruct); err != nil {
|
||||
panic("marshal code.json: " + err.Error())
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, codeJSON) {
|
||||
println("different lengths", len(data), len(codeJSON))
|
||||
for i := 0; i < len(data) && i < len(codeJSON); i++ {
|
||||
if data[i] != codeJSON[i] {
|
||||
println("re-marshal: changed at byte", i)
|
||||
println("orig: ", string(codeJSON[i-10:i+10]))
|
||||
println("new: ", string(data[i-10:i+10]))
|
||||
break
|
||||
}
|
||||
}
|
||||
panic("re-marshal code.json: different result")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCodeEncoder(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
enc := NewEncoder(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := enc.Encode(&codeStruct); err != nil {
|
||||
b.Fatal("Encode:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeMarshal(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := Marshal(&codeStruct); err != nil {
|
||||
b.Fatal("Marshal:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeDecoder(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
dec := NewDecoder(&buf)
|
||||
var r codeResponse
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Write(codeJSON)
|
||||
// hide EOF
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteByte('\n')
|
||||
if err := dec.Decode(&r); err != nil {
|
||||
b.Fatal("Decode:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkDecoderStream(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var buf bytes.Buffer
|
||||
dec := NewDecoder(&buf)
|
||||
buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
|
||||
var x interface{}
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
b.Fatal("Decode:", err)
|
||||
}
|
||||
ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if i%300000 == 0 {
|
||||
buf.WriteString(ones)
|
||||
}
|
||||
x = nil
|
||||
if err := dec.Decode(&x); err != nil || x != 1.0 {
|
||||
b.Fatalf("Decode: %v after %d", err, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCodeUnmarshal(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
var r codeResponse
|
||||
if err := Unmarshal(codeJSON, &r); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeUnmarshalReuse(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
var r codeResponse
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(codeJSON, &r); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalString(b *testing.B) {
|
||||
data := []byte(`"hello, world"`)
|
||||
var s string
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &s); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalFloat64(b *testing.B) {
|
||||
var f float64
|
||||
data := []byte(`3.14`)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &f); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalInt64(b *testing.B) {
|
||||
var x int64
|
||||
data := []byte(`3`)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &x); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIssue10335(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
var s struct{}
|
||||
j := []byte(`{"a":{ }}`)
|
||||
for n := 0; n < b.N; n++ {
|
||||
if err := Unmarshal(j, &s); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
1685
vendor/gopkg.in/mgo.v2/internal/json/decode.go
generated
vendored
1685
vendor/gopkg.in/mgo.v2/internal/json/decode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1512
vendor/gopkg.in/mgo.v2/internal/json/decode_test.go
generated
vendored
1512
vendor/gopkg.in/mgo.v2/internal/json/decode_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1256
vendor/gopkg.in/mgo.v2/internal/json/encode.go
generated
vendored
1256
vendor/gopkg.in/mgo.v2/internal/json/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
613
vendor/gopkg.in/mgo.v2/internal/json/encode_test.go
generated
vendored
613
vendor/gopkg.in/mgo.v2/internal/json/encode_test.go
generated
vendored
@@ -1,613 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type Optionals struct {
|
||||
Sr string `json:"sr"`
|
||||
So string `json:"so,omitempty"`
|
||||
Sw string `json:"-"`
|
||||
|
||||
Ir int `json:"omitempty"` // actually named omitempty, not an option
|
||||
Io int `json:"io,omitempty"`
|
||||
|
||||
Slr []string `json:"slr,random"`
|
||||
Slo []string `json:"slo,omitempty"`
|
||||
|
||||
Mr map[string]interface{} `json:"mr"`
|
||||
Mo map[string]interface{} `json:",omitempty"`
|
||||
|
||||
Fr float64 `json:"fr"`
|
||||
Fo float64 `json:"fo,omitempty"`
|
||||
|
||||
Br bool `json:"br"`
|
||||
Bo bool `json:"bo,omitempty"`
|
||||
|
||||
Ur uint `json:"ur"`
|
||||
Uo uint `json:"uo,omitempty"`
|
||||
|
||||
Str struct{} `json:"str"`
|
||||
Sto struct{} `json:"sto,omitempty"`
|
||||
}
|
||||
|
||||
var optionalsExpected = `{
|
||||
"sr": "",
|
||||
"omitempty": 0,
|
||||
"slr": null,
|
||||
"mr": {},
|
||||
"fr": 0,
|
||||
"br": false,
|
||||
"ur": 0,
|
||||
"str": {},
|
||||
"sto": {}
|
||||
}`
|
||||
|
||||
func TestOmitEmpty(t *testing.T) {
|
||||
var o Optionals
|
||||
o.Sw = "something"
|
||||
o.Mr = map[string]interface{}{}
|
||||
o.Mo = map[string]interface{}{}
|
||||
|
||||
got, err := MarshalIndent(&o, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := string(got); got != optionalsExpected {
|
||||
t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
|
||||
}
|
||||
}
|
||||
|
||||
type StringTag struct {
|
||||
BoolStr bool `json:",string"`
|
||||
IntStr int64 `json:",string"`
|
||||
StrStr string `json:",string"`
|
||||
}
|
||||
|
||||
var stringTagExpected = `{
|
||||
"BoolStr": "true",
|
||||
"IntStr": "42",
|
||||
"StrStr": "\"xzbit\""
|
||||
}`
|
||||
|
||||
func TestStringTag(t *testing.T) {
|
||||
var s StringTag
|
||||
s.BoolStr = true
|
||||
s.IntStr = 42
|
||||
s.StrStr = "xzbit"
|
||||
got, err := MarshalIndent(&s, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := string(got); got != stringTagExpected {
|
||||
t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
|
||||
}
|
||||
|
||||
// Verify that it round-trips.
|
||||
var s2 StringTag
|
||||
err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
|
||||
if err != nil {
|
||||
t.Fatalf("Decode: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, s2) {
|
||||
t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
|
||||
}
|
||||
}
|
||||
|
||||
// byte slices are special even if they're renamed types.
|
||||
type renamedByte byte
|
||||
type renamedByteSlice []byte
|
||||
type renamedRenamedByteSlice []renamedByte
|
||||
|
||||
func TestEncodeRenamedByteSlice(t *testing.T) {
|
||||
s := renamedByteSlice("abc")
|
||||
result, err := Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expect := `"YWJj"`
|
||||
if string(result) != expect {
|
||||
t.Errorf(" got %s want %s", result, expect)
|
||||
}
|
||||
r := renamedRenamedByteSlice("abc")
|
||||
result, err = Marshal(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(result) != expect {
|
||||
t.Errorf(" got %s want %s", result, expect)
|
||||
}
|
||||
}
|
||||
|
||||
var unsupportedValues = []interface{}{
|
||||
math.NaN(),
|
||||
math.Inf(-1),
|
||||
math.Inf(1),
|
||||
}
|
||||
|
||||
func TestUnsupportedValues(t *testing.T) {
|
||||
for _, v := range unsupportedValues {
|
||||
if _, err := Marshal(v); err != nil {
|
||||
if _, ok := err.(*UnsupportedValueError); !ok {
|
||||
t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("for %v, expected error", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ref has Marshaler and Unmarshaler methods with pointer receiver.
|
||||
type Ref int
|
||||
|
||||
func (*Ref) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"ref"`), nil
|
||||
}
|
||||
|
||||
func (r *Ref) UnmarshalJSON([]byte) error {
|
||||
*r = 12
|
||||
return nil
|
||||
}
|
||||
|
||||
// Val has Marshaler methods with value receiver.
|
||||
type Val int
|
||||
|
||||
func (Val) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"val"`), nil
|
||||
}
|
||||
|
||||
// RefText has Marshaler and Unmarshaler methods with pointer receiver.
|
||||
type RefText int
|
||||
|
||||
func (*RefText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"ref"`), nil
|
||||
}
|
||||
|
||||
func (r *RefText) UnmarshalText([]byte) error {
|
||||
*r = 13
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValText has Marshaler methods with value receiver.
|
||||
type ValText int
|
||||
|
||||
func (ValText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"val"`), nil
|
||||
}
|
||||
|
||||
func TestRefValMarshal(t *testing.T) {
|
||||
var s = struct {
|
||||
R0 Ref
|
||||
R1 *Ref
|
||||
R2 RefText
|
||||
R3 *RefText
|
||||
V0 Val
|
||||
V1 *Val
|
||||
V2 ValText
|
||||
V3 *ValText
|
||||
}{
|
||||
R0: 12,
|
||||
R1: new(Ref),
|
||||
R2: 14,
|
||||
R3: new(RefText),
|
||||
V0: 13,
|
||||
V1: new(Val),
|
||||
V2: 15,
|
||||
V3: new(ValText),
|
||||
}
|
||||
const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
|
||||
b, err := Marshal(&s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// C implements Marshaler and returns unescaped JSON.
|
||||
type C int
|
||||
|
||||
func (C) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"<&>"`), nil
|
||||
}
|
||||
|
||||
// CText implements Marshaler and returns unescaped text.
|
||||
type CText int
|
||||
|
||||
func (CText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"<&>"`), nil
|
||||
}
|
||||
|
||||
func TestMarshalerEscaping(t *testing.T) {
|
||||
var c C
|
||||
want := `"\u003c\u0026\u003e"`
|
||||
b, err := Marshal(c)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(c): %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("Marshal(c) = %#q, want %#q", got, want)
|
||||
}
|
||||
|
||||
var ct CText
|
||||
want = `"\"\u003c\u0026\u003e\""`
|
||||
b, err = Marshal(ct)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(ct): %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type IntType int
|
||||
|
||||
type MyStruct struct {
|
||||
IntType
|
||||
}
|
||||
|
||||
func TestAnonymousNonstruct(t *testing.T) {
|
||||
var i IntType = 11
|
||||
a := MyStruct{i}
|
||||
const want = `{"IntType":11}`
|
||||
|
||||
b, err := Marshal(a)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type BugA struct {
|
||||
S string
|
||||
}
|
||||
|
||||
type BugB struct {
|
||||
BugA
|
||||
S string
|
||||
}
|
||||
|
||||
type BugC struct {
|
||||
S string
|
||||
}
|
||||
|
||||
// Legal Go: We never use the repeated embedded field (S).
|
||||
type BugX struct {
|
||||
A int
|
||||
BugA
|
||||
BugB
|
||||
}
|
||||
|
||||
// Issue 5245.
|
||||
func TestEmbeddedBug(t *testing.T) {
|
||||
v := BugB{
|
||||
BugA{"A"},
|
||||
"B",
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{"S":"B"}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
// Now check that the duplicate field, S, does not appear.
|
||||
x := BugX{
|
||||
A: 23,
|
||||
}
|
||||
b, err = Marshal(x)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want = `{"A":23}`
|
||||
got = string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type BugD struct { // Same as BugA after tagging.
|
||||
XXX string `json:"S"`
|
||||
}
|
||||
|
||||
// BugD's tagged S field should dominate BugA's.
|
||||
type BugY struct {
|
||||
BugA
|
||||
BugD
|
||||
}
|
||||
|
||||
// Test that a field with a tag dominates untagged fields.
|
||||
func TestTaggedFieldDominates(t *testing.T) {
|
||||
v := BugY{
|
||||
BugA{"BugA"},
|
||||
BugD{"BugD"},
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{"S":"BugD"}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// There are no tags here, so S should not appear.
|
||||
type BugZ struct {
|
||||
BugA
|
||||
BugC
|
||||
BugY // Contains a tagged S field through BugD; should not dominate.
|
||||
}
|
||||
|
||||
func TestDuplicatedFieldDisappears(t *testing.T) {
|
||||
v := BugZ{
|
||||
BugA{"BugA"},
|
||||
BugC{"BugC"},
|
||||
BugY{
|
||||
BugA{"nested BugA"},
|
||||
BugD{"nested BugD"},
|
||||
},
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringBytes(t *testing.T) {
|
||||
// Test that encodeState.stringBytes and encodeState.string use the same encoding.
|
||||
var r []rune
|
||||
for i := '\u0000'; i <= unicode.MaxRune; i++ {
|
||||
r = append(r, i)
|
||||
}
|
||||
s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
|
||||
|
||||
for _, escapeHTML := range []bool{true, false} {
|
||||
es := &encodeState{}
|
||||
es.string(s, escapeHTML)
|
||||
|
||||
esBytes := &encodeState{}
|
||||
esBytes.stringBytes([]byte(s), escapeHTML)
|
||||
|
||||
enc := es.Buffer.String()
|
||||
encBytes := esBytes.Buffer.String()
|
||||
if enc != encBytes {
|
||||
i := 0
|
||||
for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
|
||||
i++
|
||||
}
|
||||
enc = enc[i:]
|
||||
encBytes = encBytes[i:]
|
||||
i = 0
|
||||
for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
|
||||
i++
|
||||
}
|
||||
enc = enc[:len(enc)-i]
|
||||
encBytes = encBytes[:len(encBytes)-i]
|
||||
|
||||
if len(enc) > 20 {
|
||||
enc = enc[:20] + "..."
|
||||
}
|
||||
if len(encBytes) > 20 {
|
||||
encBytes = encBytes[:20] + "..."
|
||||
}
|
||||
|
||||
t.Errorf("with escapeHTML=%t, encodings differ at %#q vs %#q",
|
||||
escapeHTML, enc, encBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue6458(t *testing.T) {
|
||||
type Foo struct {
|
||||
M RawMessage
|
||||
}
|
||||
x := Foo{RawMessage(`"foo"`)}
|
||||
|
||||
b, err := Marshal(&x)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := `{"M":"foo"}`; string(b) != want {
|
||||
t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
|
||||
}
|
||||
|
||||
b, err = Marshal(x)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want := `{"M":"ImZvbyI="}`; string(b) != want {
|
||||
t.Errorf("Marshal(x) = %#q; want %#q", b, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue10281(t *testing.T) {
|
||||
type Foo struct {
|
||||
N Number
|
||||
}
|
||||
x := Foo{Number(`invalid`)}
|
||||
|
||||
b, err := Marshal(&x)
|
||||
if err == nil {
|
||||
t.Errorf("Marshal(&x) = %#q; want error", b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTMLEscape(t *testing.T) {
|
||||
var b, want bytes.Buffer
|
||||
m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
|
||||
want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
|
||||
HTMLEscape(&b, []byte(m))
|
||||
if !bytes.Equal(b.Bytes(), want.Bytes()) {
|
||||
t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// golang.org/issue/8582
|
||||
func TestEncodePointerString(t *testing.T) {
|
||||
type stringPointer struct {
|
||||
N *int64 `json:"n,string"`
|
||||
}
|
||||
var n int64 = 42
|
||||
b, err := Marshal(stringPointer{N: &n})
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got, want := string(b), `{"n":"42"}`; got != want {
|
||||
t.Errorf("Marshal = %s, want %s", got, want)
|
||||
}
|
||||
var back stringPointer
|
||||
err = Unmarshal(b, &back)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
if back.N == nil {
|
||||
t.Fatalf("Unmarshalled nil N field")
|
||||
}
|
||||
if *back.N != 42 {
|
||||
t.Fatalf("*N = %d; want 42", *back.N)
|
||||
}
|
||||
}
|
||||
|
||||
var encodeStringTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"\x00", `"\u0000"`},
|
||||
{"\x01", `"\u0001"`},
|
||||
{"\x02", `"\u0002"`},
|
||||
{"\x03", `"\u0003"`},
|
||||
{"\x04", `"\u0004"`},
|
||||
{"\x05", `"\u0005"`},
|
||||
{"\x06", `"\u0006"`},
|
||||
{"\x07", `"\u0007"`},
|
||||
{"\x08", `"\u0008"`},
|
||||
{"\x09", `"\t"`},
|
||||
{"\x0a", `"\n"`},
|
||||
{"\x0b", `"\u000b"`},
|
||||
{"\x0c", `"\u000c"`},
|
||||
{"\x0d", `"\r"`},
|
||||
{"\x0e", `"\u000e"`},
|
||||
{"\x0f", `"\u000f"`},
|
||||
{"\x10", `"\u0010"`},
|
||||
{"\x11", `"\u0011"`},
|
||||
{"\x12", `"\u0012"`},
|
||||
{"\x13", `"\u0013"`},
|
||||
{"\x14", `"\u0014"`},
|
||||
{"\x15", `"\u0015"`},
|
||||
{"\x16", `"\u0016"`},
|
||||
{"\x17", `"\u0017"`},
|
||||
{"\x18", `"\u0018"`},
|
||||
{"\x19", `"\u0019"`},
|
||||
{"\x1a", `"\u001a"`},
|
||||
{"\x1b", `"\u001b"`},
|
||||
{"\x1c", `"\u001c"`},
|
||||
{"\x1d", `"\u001d"`},
|
||||
{"\x1e", `"\u001e"`},
|
||||
{"\x1f", `"\u001f"`},
|
||||
}
|
||||
|
||||
func TestEncodeString(t *testing.T) {
|
||||
for _, tt := range encodeStringTests {
|
||||
b, err := Marshal(tt.in)
|
||||
if err != nil {
|
||||
t.Errorf("Marshal(%q): %v", tt.in, err)
|
||||
continue
|
||||
}
|
||||
out := string(b)
|
||||
if out != tt.out {
|
||||
t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type jsonbyte byte
|
||||
|
||||
func (b jsonbyte) MarshalJSON() ([]byte, error) { return tenc(`{"JB":%d}`, b) }
|
||||
|
||||
type textbyte byte
|
||||
|
||||
func (b textbyte) MarshalText() ([]byte, error) { return tenc(`TB:%d`, b) }
|
||||
|
||||
type jsonint int
|
||||
|
||||
func (i jsonint) MarshalJSON() ([]byte, error) { return tenc(`{"JI":%d}`, i) }
|
||||
|
||||
type textint int
|
||||
|
||||
func (i textint) MarshalText() ([]byte, error) { return tenc(`TI:%d`, i) }
|
||||
|
||||
func tenc(format string, a ...interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, format, a...)
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Issue 13783
|
||||
func TestEncodeBytekind(t *testing.T) {
|
||||
testdata := []struct {
|
||||
data interface{}
|
||||
want string
|
||||
}{
|
||||
{byte(7), "7"},
|
||||
{jsonbyte(7), `{"JB":7}`},
|
||||
{textbyte(4), `"TB:4"`},
|
||||
{jsonint(5), `{"JI":5}`},
|
||||
{textint(1), `"TI:1"`},
|
||||
{[]byte{0, 1}, `"AAE="`},
|
||||
{[]jsonbyte{0, 1}, `[{"JB":0},{"JB":1}]`},
|
||||
{[][]jsonbyte{{0, 1}, {3}}, `[[{"JB":0},{"JB":1}],[{"JB":3}]]`},
|
||||
{[]textbyte{2, 3}, `["TB:2","TB:3"]`},
|
||||
{[]jsonint{5, 4}, `[{"JI":5},{"JI":4}]`},
|
||||
{[]textint{9, 3}, `["TI:9","TI:3"]`},
|
||||
{[]int{9, 3}, `[9,3]`},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
js, err := Marshal(d.data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
got, want := string(js), d.want
|
||||
if got != want {
|
||||
t.Errorf("got %s, want %s", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextMarshalerMapKeysAreSorted(t *testing.T) {
|
||||
b, err := Marshal(map[unmarshalerText]int{
|
||||
{"x", "y"}: 1,
|
||||
{"y", "x"}: 2,
|
||||
{"a", "z"}: 3,
|
||||
{"z", "a"}: 4,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to Marshal text.Marshaler: %v", err)
|
||||
}
|
||||
const want = `{"a:z":3,"x:y":1,"y:x":2,"z:a":4}`
|
||||
if string(b) != want {
|
||||
t.Errorf("Marshal map with text.Marshaler keys: got %#q, want %#q", b, want)
|
||||
}
|
||||
}
|
||||
252
vendor/gopkg.in/mgo.v2/internal/json/example_test.go
generated
vendored
252
vendor/gopkg.in/mgo.v2/internal/json/example_test.go
generated
vendored
@@ -1,252 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ExampleMarshal() {
|
||||
type ColorGroup struct {
|
||||
ID int
|
||||
Name string
|
||||
Colors []string
|
||||
}
|
||||
group := ColorGroup{
|
||||
ID: 1,
|
||||
Name: "Reds",
|
||||
Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
|
||||
}
|
||||
b, err := json.Marshal(group)
|
||||
if err != nil {
|
||||
fmt.Println("error:", err)
|
||||
}
|
||||
os.Stdout.Write(b)
|
||||
// Output:
|
||||
// {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}
|
||||
}
|
||||
|
||||
func ExampleUnmarshal() {
|
||||
var jsonBlob = []byte(`[
|
||||
{"Name": "Platypus", "Order": "Monotremata"},
|
||||
{"Name": "Quoll", "Order": "Dasyuromorphia"}
|
||||
]`)
|
||||
type Animal struct {
|
||||
Name string
|
||||
Order string
|
||||
}
|
||||
var animals []Animal
|
||||
err := json.Unmarshal(jsonBlob, &animals)
|
||||
if err != nil {
|
||||
fmt.Println("error:", err)
|
||||
}
|
||||
fmt.Printf("%+v", animals)
|
||||
// Output:
|
||||
// [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}]
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a stream of distinct JSON values.
|
||||
func ExampleDecoder() {
|
||||
const jsonStream = `
|
||||
{"Name": "Ed", "Text": "Knock knock."}
|
||||
{"Name": "Sam", "Text": "Who's there?"}
|
||||
{"Name": "Ed", "Text": "Go fmt."}
|
||||
{"Name": "Sam", "Text": "Go fmt who?"}
|
||||
{"Name": "Ed", "Text": "Go fmt yourself!"}
|
||||
`
|
||||
type Message struct {
|
||||
Name, Text string
|
||||
}
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
for {
|
||||
var m Message
|
||||
if err := dec.Decode(&m); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s: %s\n", m.Name, m.Text)
|
||||
}
|
||||
// Output:
|
||||
// Ed: Knock knock.
|
||||
// Sam: Who's there?
|
||||
// Ed: Go fmt.
|
||||
// Sam: Go fmt who?
|
||||
// Ed: Go fmt yourself!
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a stream of distinct JSON values.
|
||||
func ExampleDecoder_Token() {
|
||||
const jsonStream = `
|
||||
{"Message": "Hello", "Array": [1, 2, 3], "Null": null, "Number": 1.234}
|
||||
`
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
for {
|
||||
t, err := dec.Token()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v", t, t)
|
||||
if dec.More() {
|
||||
fmt.Printf(" (more)")
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
// Output:
|
||||
// json.Delim: { (more)
|
||||
// string: Message (more)
|
||||
// string: Hello (more)
|
||||
// string: Array (more)
|
||||
// json.Delim: [ (more)
|
||||
// float64: 1 (more)
|
||||
// float64: 2 (more)
|
||||
// float64: 3
|
||||
// json.Delim: ] (more)
|
||||
// string: Null (more)
|
||||
// <nil>: <nil> (more)
|
||||
// string: Number (more)
|
||||
// float64: 1.234
|
||||
// json.Delim: }
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a streaming array of JSON objects.
|
||||
func ExampleDecoder_Decode_stream() {
|
||||
const jsonStream = `
|
||||
[
|
||||
{"Name": "Ed", "Text": "Knock knock."},
|
||||
{"Name": "Sam", "Text": "Who's there?"},
|
||||
{"Name": "Ed", "Text": "Go fmt."},
|
||||
{"Name": "Sam", "Text": "Go fmt who?"},
|
||||
{"Name": "Ed", "Text": "Go fmt yourself!"}
|
||||
]
|
||||
`
|
||||
type Message struct {
|
||||
Name, Text string
|
||||
}
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
|
||||
// read open bracket
|
||||
t, err := dec.Token()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v\n", t, t)
|
||||
|
||||
var m Message
|
||||
// while the array contains values
|
||||
for dec.More() {
|
||||
|
||||
// decode an array value (Message)
|
||||
err := dec.Decode(&m)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%v: %v\n", m.Name, m.Text)
|
||||
}
|
||||
|
||||
// read closing bracket
|
||||
t, err = dec.Token()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v\n", t, t)
|
||||
|
||||
// Output:
|
||||
// json.Delim: [
|
||||
// Ed: Knock knock.
|
||||
// Sam: Who's there?
|
||||
// Ed: Go fmt.
|
||||
// Sam: Go fmt who?
|
||||
// Ed: Go fmt yourself!
|
||||
// json.Delim: ]
|
||||
|
||||
}
|
||||
|
||||
// This example uses RawMessage to delay parsing part of a JSON message.
|
||||
func ExampleRawMessage() {
|
||||
type Color struct {
|
||||
Space string
|
||||
Point json.RawMessage // delay parsing until we know the color space
|
||||
}
|
||||
type RGB struct {
|
||||
R uint8
|
||||
G uint8
|
||||
B uint8
|
||||
}
|
||||
type YCbCr struct {
|
||||
Y uint8
|
||||
Cb int8
|
||||
Cr int8
|
||||
}
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
var colors []Color
|
||||
err := json.Unmarshal(j, &colors)
|
||||
if err != nil {
|
||||
log.Fatalln("error:", err)
|
||||
}
|
||||
|
||||
for _, c := range colors {
|
||||
var dst interface{}
|
||||
switch c.Space {
|
||||
case "RGB":
|
||||
dst = new(RGB)
|
||||
case "YCbCr":
|
||||
dst = new(YCbCr)
|
||||
}
|
||||
err := json.Unmarshal(c.Point, dst)
|
||||
if err != nil {
|
||||
log.Fatalln("error:", err)
|
||||
}
|
||||
fmt.Println(c.Space, dst)
|
||||
}
|
||||
// Output:
|
||||
// YCbCr &{255 0 -10}
|
||||
// RGB &{98 218 255}
|
||||
}
|
||||
|
||||
func ExampleIndent() {
|
||||
type Road struct {
|
||||
Name string
|
||||
Number int
|
||||
}
|
||||
roads := []Road{
|
||||
{"Diamond Fork", 29},
|
||||
{"Sheep Creek", 51},
|
||||
}
|
||||
|
||||
b, err := json.Marshal(roads)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
json.Indent(&out, b, "=", "\t")
|
||||
out.WriteTo(os.Stdout)
|
||||
// Output:
|
||||
// [
|
||||
// = {
|
||||
// = "Name": "Diamond Fork",
|
||||
// = "Number": 29
|
||||
// = },
|
||||
// = {
|
||||
// = "Name": "Sheep Creek",
|
||||
// = "Number": 51
|
||||
// = }
|
||||
// =]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user