add functions/vendor files

This commit is contained in:
Reed Allman
2017-06-11 02:05:36 -07:00
parent 6ee9c1fa0a
commit f2c7aa5ee6
7294 changed files with 1629834 additions and 0 deletions

29
vendor/gopkg.in/go-playground/validator.v8/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,29 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.test
*.out
*.txt
cover.html
README.html

22
vendor/gopkg.in/go-playground/validator.v8/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

368
vendor/gopkg.in/go-playground/validator.v8/README.md generated vendored Normal file
View File

@@ -0,0 +1,368 @@
Package validator
================
![Validator](logo.png)
[![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8)
[![Go Report Card](http://goreportcard.com/badge/go-playground/validator)](http://goreportcard.com/report/go-playground/validator)
[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
Installation
------------
Use go get.
go get gopkg.in/go-playground/validator.v8
or to update
go get -u gopkg.in/go-playground/validator.v8
Then import the validator package into your own code.
import "gopkg.in/go-playground/validator.v8"
Error Return Value
-------
Validation functions return type error
They return type error to avoid the issue discussed in the following, where err is always != nil:
* http://stackoverflow.com/a/29138676/3158232
* https://github.com/go-playground/validator/issues/134
validator only returns nil or ValidationErrors as type error; so in you code all you need to do
is check if the error returned is not nil, and if it's not type cast it to type ValidationErrors
like so:
```go
err := validate.Struct(mystruct)
validationErrors := err.(validator.ValidationErrors)
```
Usage and documentation
------
Please see http://godoc.org/gopkg.in/go-playground/validator.v8 for detailed usage docs.
##### Examples:
Struct & Field validation
```go
package main
import (
"fmt"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `validate:"required"`
LastName string `validate:"required"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validateStruct()
validateField()
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
}
user := &User{
FirstName: "Badger",
LastName: "Smith",
Age: 135,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag
// Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag
err := errs.(validator.ValidationErrors)["User.Addresses[0].City"]
fmt.Println(err.Field) // output: City
fmt.Println(err.Tag) // output: required
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}
func validateField() {
myEmail := "joeybloggs.gmail.com"
errs := validate.Field(myEmail, "required,email")
if errs != nil {
fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag
return
}
// email ok, move on
}
```
Custom Field Type
```go
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// DbBackedUser User struct
type DbBackedUser struct {
Name sql.NullString `validate:"required"`
Age sql.NullInt64 `validate:"required"`
}
func main() {
config := &validator.Config{TagName: "validate"}
validate := validator.New(config)
// register all sql.Null* types to use the ValidateValuer CustomTypeFunc
validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{})
x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}}
errs := validate.Struct(x)
if len(errs.(validator.ValidationErrors)) > 0 {
fmt.Printf("Errs:\n%+v\n", errs)
}
}
// ValidateValuer implements validator.CustomTypeFunc
func ValidateValuer(field reflect.Value) interface{} {
if valuer, ok := field.Interface().(driver.Valuer); ok {
val, err := valuer.Value()
if err == nil {
return val
}
// handle the error how you want
}
return nil
}
```
Struct Level Validation
```go
package main
import (
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `json:"fname"`
LastName string `json:"lname"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validate.RegisterStructValidation(UserStructLevelValidation, User{})
validateStruct()
}
// UserStructLevelValidation contains custom struct level validations that don't always
// make sense at the field validation level. For Example this function validates that either
// FirstName or LastName exist; could have done that with a custom field validation but then
// would have had to add it to both fields duplicating the logic + overhead, this way it's
// only validated once.
//
// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way
// hooks right into validator and you can combine with validation tags and still have a
// common error output format.
func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) {
user := structLevel.CurrentStruct.Interface().(User)
if len(user.FirstName) == 0 && len(user.LastName) == 0 {
structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname")
structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname")
}
// plus can to more, even with different tag than "fnameorlname"
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
City: "Unknown",
}
user := &User{
FirstName: "",
LastName: "",
Age: 45,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag
// Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag
err := errs.(validator.ValidationErrors)["User.FirstName"]
fmt.Println(err.Field) // output: FirstName
fmt.Println(err.Tag) // output: fnameorlname
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}
```
Benchmarks
------
###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.2 darwin/amd64
```go
go test -cpu=4 -bench=. -benchmem=true
PASS
BenchmarkFieldSuccess-4 10000000 176 ns/op 0 B/op 0 allocs/op
BenchmarkFieldFailure-4 2000000 727 ns/op 432 B/op 4 allocs/op
BenchmarkFieldDiveSuccess-4 500000 3220 ns/op 480 B/op 27 allocs/op
BenchmarkFieldDiveFailure-4 500000 3823 ns/op 912 B/op 31 allocs/op
BenchmarkFieldCustomTypeSuccess-4 5000000 368 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeFailure-4 2000000 699 ns/op 432 B/op 4 allocs/op
BenchmarkFieldOrTagSuccess-4 1000000 1265 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagFailure-4 1000000 1182 ns/op 464 B/op 6 allocs/op
BenchmarkStructLevelValidationSuccess-4 2000000 739 ns/op 176 B/op 6 allocs/op
BenchmarkStructLevelValidationFailure-4 1000000 1368 ns/op 640 B/op 11 allocs/op
BenchmarkStructSimpleCustomTypeSuccess-4 2000000 965 ns/op 80 B/op 5 allocs/op
BenchmarkStructSimpleCustomTypeFailure-4 1000000 1561 ns/op 688 B/op 11 allocs/op
BenchmarkStructPartialSuccess-4 1000000 1285 ns/op 384 B/op 10 allocs/op
BenchmarkStructPartialFailure-4 1000000 1879 ns/op 832 B/op 15 allocs/op
BenchmarkStructExceptSuccess-4 2000000 1038 ns/op 336 B/op 7 allocs/op
BenchmarkStructExceptFailure-4 1000000 1330 ns/op 384 B/op 10 allocs/op
BenchmarkStructSimpleCrossFieldSuccess-4 1000000 1081 ns/op 128 B/op 6 allocs/op
BenchmarkStructSimpleCrossFieldFailure-4 1000000 1737 ns/op 592 B/op 11 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1790 ns/op 192 B/op 10 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailure-4 500000 2431 ns/op 656 B/op 15 allocs/op
BenchmarkStructSimpleSuccess-4 2000000 950 ns/op 48 B/op 3 allocs/op
BenchmarkStructSimpleFailure-4 1000000 1672 ns/op 688 B/op 11 allocs/op
BenchmarkStructSimpleSuccessParallel-4 5000000 271 ns/op 48 B/op 3 allocs/op
BenchmarkStructSimpleFailureParallel-4 2000000 670 ns/op 688 B/op 11 allocs/op
BenchmarkStructComplexSuccess-4 300000 5828 ns/op 544 B/op 32 allocs/op
BenchmarkStructComplexFailure-4 200000 11382 ns/op 3912 B/op 77 allocs/op
BenchmarkStructComplexSuccessParallel-4 1000000 1739 ns/op 544 B/op 32 allocs/op
BenchmarkStructComplexFailureParallel-4 300000 4682 ns/op 3912 B/op 77 allocs/op
```
Complimentary Software
----------------------
Here is a list of software that compliments using this library either pre or post validation.
* [Gorilla Schema](https://github.com/gorilla/schema) - Package gorilla/schema fills a struct with form values.
* [Conform](https://github.com/leebenson/conform) - Trims, sanitizes & scrubs data based on struct tags.
How to Contribute
------
There will always be a development branch for each version i.e. `v1-development`. In order to contribute,
please make your pull requests against those branches.
If the changes being proposed or requested are breaking changes, please create an issue, for discussion
or create a pull request against the highest development branch for example this package has a
v1 and v1-development branch however, there will also be a v2-development branch even though v2 doesn't exist yet.
I strongly encourage everyone whom creates a custom validation function to contribute them and
help make this package even better.
License
------
Distributed under MIT License, please see license file in code for more details.

1238
vendor/gopkg.in/go-playground/validator.v8/baked_in.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,524 @@
package validator
import (
sql "database/sql/driver"
"testing"
"time"
)
func BenchmarkFieldSuccess(b *testing.B) {
var s *string
tmp := "1"
s = &tmp
for n := 0; n < b.N; n++ {
validate.Field(s, "len=1")
}
}
func BenchmarkFieldFailure(b *testing.B) {
var s *string
tmp := "12"
s = &tmp
for n := 0; n < b.N; n++ {
validate.Field(s, "len=1")
}
}
func BenchmarkFieldDiveSuccess(b *testing.B) {
m := make([]*string, 3)
t1 := "val1"
t2 := "val2"
t3 := "val3"
m[0] = &t1
m[1] = &t2
m[2] = &t3
for n := 0; n < b.N; n++ {
validate.Field(m, "required,dive,required")
}
}
func BenchmarkFieldDiveFailure(b *testing.B) {
m := make([]*string, 3)
t1 := "val1"
t2 := ""
t3 := "val3"
m[0] = &t1
m[1] = &t2
m[2] = &t3
for n := 0; n < b.N; n++ {
validate.Field(m, "required,dive,required")
}
}
func BenchmarkFieldCustomTypeSuccess(b *testing.B) {
validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{})
val := valuer{
Name: "1",
}
for n := 0; n < b.N; n++ {
validate.Field(val, "len=1")
}
}
func BenchmarkFieldCustomTypeFailure(b *testing.B) {
validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{})
val := valuer{}
for n := 0; n < b.N; n++ {
validate.Field(val, "len=1")
}
}
func BenchmarkFieldOrTagSuccess(b *testing.B) {
var s *string
tmp := "rgba(0,0,0,1)"
s = &tmp
for n := 0; n < b.N; n++ {
validate.Field(s, "rgb|rgba")
}
}
func BenchmarkFieldOrTagFailure(b *testing.B) {
var s *string
tmp := "#000"
s = &tmp
for n := 0; n < b.N; n++ {
validate.Field(s, "rgb|rgba")
}
}
func BenchmarkStructLevelValidationSuccess(b *testing.B) {
validate.RegisterStructValidation(StructValidationTestStructSuccess, TestStruct{})
tst := &TestStruct{
String: "good value",
}
for n := 0; n < b.N; n++ {
validate.Struct(tst)
}
}
func BenchmarkStructLevelValidationFailure(b *testing.B) {
validate.RegisterStructValidation(StructValidationTestStruct, TestStruct{})
tst := &TestStruct{
String: "good value",
}
for n := 0; n < b.N; n++ {
validate.Struct(tst)
}
}
func BenchmarkStructSimpleCustomTypeSuccess(b *testing.B) {
validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{})
val := valuer{
Name: "1",
}
type Foo struct {
Valuer valuer `validate:"len=1"`
IntValue int `validate:"min=5,max=10"`
}
validFoo := &Foo{Valuer: val, IntValue: 7}
for n := 0; n < b.N; n++ {
validate.Struct(validFoo)
}
}
func BenchmarkStructSimpleCustomTypeFailure(b *testing.B) {
validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{})
val := valuer{}
type Foo struct {
Valuer valuer `validate:"len=1"`
IntValue int `validate:"min=5,max=10"`
}
validFoo := &Foo{Valuer: val, IntValue: 3}
for n := 0; n < b.N; n++ {
validate.Struct(validFoo)
}
}
func BenchmarkStructPartialSuccess(b *testing.B) {
type Test struct {
Name string `validate:"required"`
NickName string `validate:"required"`
}
test := &Test{
Name: "Joey Bloggs",
}
for n := 0; n < b.N; n++ {
validate.StructPartial(test, "Name")
}
}
func BenchmarkStructPartialFailure(b *testing.B) {
type Test struct {
Name string `validate:"required"`
NickName string `validate:"required"`
}
test := &Test{
Name: "Joey Bloggs",
}
for n := 0; n < b.N; n++ {
validate.StructPartial(test, "NickName")
}
}
func BenchmarkStructExceptSuccess(b *testing.B) {
type Test struct {
Name string `validate:"required"`
NickName string `validate:"required"`
}
test := &Test{
Name: "Joey Bloggs",
}
for n := 0; n < b.N; n++ {
validate.StructPartial(test, "Nickname")
}
}
func BenchmarkStructExceptFailure(b *testing.B) {
type Test struct {
Name string `validate:"required"`
NickName string `validate:"required"`
}
test := &Test{
Name: "Joey Bloggs",
}
for n := 0; n < b.N; n++ {
validate.StructPartial(test, "Name")
}
}
func BenchmarkStructSimpleCrossFieldSuccess(b *testing.B) {
type Test struct {
Start time.Time
End time.Time `validate:"gtfield=Start"`
}
now := time.Now().UTC()
then := now.Add(time.Hour * 5)
test := &Test{
Start: now,
End: then,
}
for n := 0; n < b.N; n++ {
validate.Struct(test)
}
}
func BenchmarkStructSimpleCrossFieldFailure(b *testing.B) {
type Test struct {
Start time.Time
End time.Time `validate:"gtfield=Start"`
}
now := time.Now().UTC()
then := now.Add(time.Hour * -5)
test := &Test{
Start: now,
End: then,
}
for n := 0; n < b.N; n++ {
validate.Struct(test)
}
}
func BenchmarkStructSimpleCrossStructCrossFieldSuccess(b *testing.B) {
type Inner struct {
Start time.Time
}
type Outer struct {
Inner *Inner
CreatedAt time.Time `validate:"eqcsfield=Inner.Start"`
}
now := time.Now().UTC()
inner := &Inner{
Start: now,
}
outer := &Outer{
Inner: inner,
CreatedAt: now,
}
for n := 0; n < b.N; n++ {
validate.Struct(outer)
}
}
func BenchmarkStructSimpleCrossStructCrossFieldFailure(b *testing.B) {
type Inner struct {
Start time.Time
}
type Outer struct {
Inner *Inner
CreatedAt time.Time `validate:"eqcsfield=Inner.Start"`
}
now := time.Now().UTC()
then := now.Add(time.Hour * 5)
inner := &Inner{
Start: then,
}
outer := &Outer{
Inner: inner,
CreatedAt: now,
}
for n := 0; n < b.N; n++ {
validate.Struct(outer)
}
}
func BenchmarkStructSimpleSuccess(b *testing.B) {
type Foo struct {
StringValue string `validate:"min=5,max=10"`
IntValue int `validate:"min=5,max=10"`
}
validFoo := &Foo{StringValue: "Foobar", IntValue: 7}
for n := 0; n < b.N; n++ {
validate.Struct(validFoo)
}
}
func BenchmarkStructSimpleFailure(b *testing.B) {
type Foo struct {
StringValue string `validate:"min=5,max=10"`
IntValue int `validate:"min=5,max=10"`
}
invalidFoo := &Foo{StringValue: "Fo", IntValue: 3}
for n := 0; n < b.N; n++ {
validate.Struct(invalidFoo)
}
}
func BenchmarkStructSimpleSuccessParallel(b *testing.B) {
type Foo struct {
StringValue string `validate:"min=5,max=10"`
IntValue int `validate:"min=5,max=10"`
}
validFoo := &Foo{StringValue: "Foobar", IntValue: 7}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
validate.Struct(validFoo)
}
})
}
func BenchmarkStructSimpleFailureParallel(b *testing.B) {
type Foo struct {
StringValue string `validate:"min=5,max=10"`
IntValue int `validate:"min=5,max=10"`
}
invalidFoo := &Foo{StringValue: "Fo", IntValue: 3}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
validate.Struct(invalidFoo)
}
})
}
func BenchmarkStructComplexSuccess(b *testing.B) {
tSuccess := &TestString{
Required: "Required",
Len: "length==10",
Min: "min=1",
Max: "1234567890",
MinMax: "12345",
Lt: "012345678",
Lte: "0123456789",
Gt: "01234567890",
Gte: "0123456789",
OmitEmpty: "",
Sub: &SubTest{
Test: "1",
},
SubIgnore: &SubTest{
Test: "",
},
Anonymous: struct {
A string `validate:"required"`
}{
A: "1",
},
Iface: &Impl{
F: "123",
},
}
for n := 0; n < b.N; n++ {
validate.Struct(tSuccess)
}
}
func BenchmarkStructComplexFailure(b *testing.B) {
tFail := &TestString{
Required: "",
Len: "",
Min: "",
Max: "12345678901",
MinMax: "",
Lt: "0123456789",
Lte: "01234567890",
Gt: "1",
Gte: "1",
OmitEmpty: "12345678901",
Sub: &SubTest{
Test: "",
},
Anonymous: struct {
A string `validate:"required"`
}{
A: "",
},
Iface: &Impl{
F: "12",
},
}
for n := 0; n < b.N; n++ {
validate.Struct(tFail)
}
}
func BenchmarkStructComplexSuccessParallel(b *testing.B) {
tSuccess := &TestString{
Required: "Required",
Len: "length==10",
Min: "min=1",
Max: "1234567890",
MinMax: "12345",
Lt: "012345678",
Lte: "0123456789",
Gt: "01234567890",
Gte: "0123456789",
OmitEmpty: "",
Sub: &SubTest{
Test: "1",
},
SubIgnore: &SubTest{
Test: "",
},
Anonymous: struct {
A string `validate:"required"`
}{
A: "1",
},
Iface: &Impl{
F: "123",
},
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
validate.Struct(tSuccess)
}
})
}
func BenchmarkStructComplexFailureParallel(b *testing.B) {
tFail := &TestString{
Required: "",
Len: "",
Min: "",
Max: "12345678901",
MinMax: "",
Lt: "0123456789",
Lte: "01234567890",
Gt: "1",
Gte: "1",
OmitEmpty: "12345678901",
Sub: &SubTest{
Test: "",
},
Anonymous: struct {
A string `validate:"required"`
}{
A: "",
},
Iface: &Impl{
F: "12",
},
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
validate.Struct(tFail)
}
})
}

71
vendor/gopkg.in/go-playground/validator.v8/cache.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
package validator
import (
"reflect"
"sync"
)
type cachedField struct {
Idx int
Name string
AltName string
CachedTag *cachedTag
}
type cachedStruct struct {
Name string
fields map[int]cachedField
}
type structCacheMap struct {
lock sync.RWMutex
m map[reflect.Type]*cachedStruct
}
func (s *structCacheMap) Get(key reflect.Type) (*cachedStruct, bool) {
s.lock.RLock()
value, ok := s.m[key]
s.lock.RUnlock()
return value, ok
}
func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) {
s.lock.Lock()
s.m[key] = value
s.lock.Unlock()
}
type cachedTag struct {
tag string
isOmitEmpty bool
isNoStructLevel bool
isStructOnly bool
diveTag string
tags []*tagVals
}
type tagVals struct {
tagVals [][]string
isOrVal bool
isAlias bool
tag string
}
type tagCacheMap struct {
lock sync.RWMutex
m map[string]*cachedTag
}
func (s *tagCacheMap) Get(key string) (*cachedTag, bool) {
s.lock.RLock()
value, ok := s.m[key]
s.lock.RUnlock()
return value, ok
}
func (s *tagCacheMap) Set(key string, value *cachedTag) {
s.lock.Lock()
s.m[key] = value
s.lock.Unlock()
}

794
vendor/gopkg.in/go-playground/validator.v8/doc.go generated vendored Normal file
View File

@@ -0,0 +1,794 @@
/*
Package validator implements value validations for structs and individual fields
based on tags.
It can also handle Cross-Field and Cross-Struct validation for nested structs
and has the ability to dive into arrays and maps of any type.
Why not a better error message?
Because this library intends for you to handle your own error messages.
Why should I handle my own errors?
Many reasons. We built an internationalized application and needed to know the
field, and what validation failed so we could provide a localized error.
if fieldErr.Field == "Name" {
switch fieldErr.ErrorTag
case "required":
return "Translated string based on field + error"
default:
return "Translated string based on field"
}
Validation Functions Return Type error
Doing things this way is actually the way the standard library does, see the
file.Open method here:
https://golang.org/pkg/os/#Open.
The authors return type "error" to avoid the issue discussed in the following,
where err is always != nil:
http://stackoverflow.com/a/29138676/3158232
https://github.com/go-playground/validator/issues/134
Validator only returns nil or ValidationErrors as type error; so, in your code
all you need to do is check if the error returned is not nil, and if it's not
type cast it to type ValidationErrors like so err.(validator.ValidationErrors).
Custom Functions
Custom functions can be added. Example:
// Structure
func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
if whatever {
return false
}
return true
}
validate.RegisterValidation("custom tag name", customFunc)
// NOTES: using the same tag name as an existing function
// will overwrite the existing one
Cross-Field Validation
Cross-Field Validation can be done via the following tags:
- eqfield
- nefield
- gtfield
- gtefield
- ltfield
- ltefield
- eqcsfield
- necsfield
- gtcsfield
- ftecsfield
- ltcsfield
- ltecsfield
If, however, some custom cross-field validation is required, it can be done
using a custom validation.
Why not just have cross-fields validation tags (i.e. only eqcsfield and not
eqfield)?
The reason is efficiency. If you want to check a field within the same struct
"eqfield" only has to find the field on the same struct (1 level). But, if we
used "eqcsfield" it could be multiple levels down. Example:
type Inner struct {
StartDate time.Time
}
type Outer struct {
InnerStructField *Inner
CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
}
now := time.Now()
inner := &Inner{
StartDate: now,
}
outer := &Outer{
InnerStructField: inner,
CreatedAt: now,
}
errs := validate.Struct(outer)
// NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
// into the function
// when calling validate.FieldWithValue(val, field, tag) val will be
// whatever you pass, struct, field...
// when calling validate.Field(field, tag) val will be nil
Multiple Validators
Multiple validators on a field will process in the order defined. Example:
type Test struct {
Field `validate:"max=10,min=1"`
}
// max will be checked then min
Bad Validator definitions are not handled by the library. Example:
type Test struct {
Field `validate:"min=10,max=0"`
}
// this definition of min max will never succeed
Using Validator Tags
Baked In Cross-Field validation only compares fields on the same struct.
If Cross-Field + Cross-Struct validation is needed you should implement your
own custom validator.
Comma (",") is the default separator of validation tags. If you wish to
have a comma included within the parameter (i.e. excludesall=,) you will need to
use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
so the above will become excludesall=0x2C.
type Test struct {
Field `validate:"excludesall=,"` // BAD! Do not include a comma.
Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
}
Pipe ("|") is the default separator of validation tags. If you wish to
have a pipe included within the parameter i.e. excludesall=| you will need to
use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
so the above will become excludesall=0x7C
type Test struct {
Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
}
Baked In Validators and Tags
Here is a list of the current built in validators:
Skip Field
Tells the validation to skip this struct field; this is particularily
handy in ignoring embedded structs from being validated. (Usage: -)
Usage: -
Or Operator
This is the 'or' operator allowing multiple validators to be used and
accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba
colors to be accepted. This can also be combined with 'and' for example
( Usage: omitempty,rgb|rgba)
Usage: |
StructOnly
When a field that is a nested struct is encountered, and contains this flag
any validation on the nested struct will be run, but none of the nested
struct fields will be validated. This is usefull if inside of you program
you know the struct will be valid, but need to verify it has been assigned.
NOTE: only "required" and "omitempty" can be used on a struct itself.
Usage: structonly
NoStructLevel
Same as structonly tag except that any struct level validations will not run.
Usage: nostructlevel
Exists
Is a special tag without a validation function attached. It is used when a field
is a Pointer, Interface or Invalid and you wish to validate that it exists.
Example: want to ensure a bool exists if you define the bool as a pointer and
use exists it will ensure there is a value; couldn't use required as it would
fail when the bool was false. exists will fail is the value is a Pointer, Interface
or Invalid and is nil.
Usage: exists
Omit Empty
Allows conditional validation, for example if a field is not set with
a value (Determined by the "required" validator) then other validation
such as min or max won't run, but if a value is set validation will run.
Usage: omitempty
Dive
This tells the validator to dive into a slice, array or map and validate that
level of the slice, array or map with the validation tags that follow.
Multidimensional nesting is also supported, each level you wish to dive will
require another dive tag.
Usage: dive
Example #1
[][]string with validation tag "gt=0,dive,len=1,dive,required"
// gt=0 will be applied to []
// len=1 will be applied to []string
// required will be applied to string
Example #2
[][]string with validation tag "gt=0,dive,dive,required"
// gt=0 will be applied to []
// []string will be spared validation
// required will be applied to string
Required
This validates that the value is not the data types default zero value.
For numbers ensures value is not zero. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
ensures the value is not nil.
Usage: required
Length
For numbers, max will ensure that the value is
equal to the parameter given. For strings, it checks that
the string length is exactly that number of characters. For slices,
arrays, and maps, validates the number of items.
Usage: len=10
Maximum
For numbers, max will ensure that the value is
less than or equal to the parameter given. For strings, it checks
that the string length is at most that number of characters. For
slices, arrays, and maps, validates the number of items.
Usage: max=10
Mininum
For numbers, min will ensure that the value is
greater or equal to the parameter given. For strings, it checks that
the string length is at least that number of characters. For slices,
arrays, and maps, validates the number of items.
Usage: min=10
Equals
For strings & numbers, eq will ensure that the value is
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Usage: eq=10
Not Equal
For strings & numbers, eq will ensure that the value is not
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Usage: eq=10
Greater Than
For numbers, this will ensure that the value is greater than the
parameter given. For strings, it checks that the string length
is greater than that number of characters. For slices, arrays
and maps it validates the number of items.
Example #1
Usage: gt=10
Example #2 (time.Time)
For time.Time ensures the time value is greater than time.Now.UTC().
Usage: gt
Greater Than or Equal
Same as 'min' above. Kept both to make terminology with 'len' easier.
Example #1
Usage: gte=10
Example #2 (time.Time)
For time.Time ensures the time value is greater than or equal to time.Now.UTC().
Usage: gte
Less Than
For numbers, this will ensure that the value is less than the parameter given.
For strings, it checks that the string length is less than that number of
characters. For slices, arrays, and maps it validates the number of items.
Example #1
Usage: lt=10
Example #2 (time.Time)
For time.Time ensures the time value is less than time.Now.UTC().
Usage: lt
Less Than or Equal
Same as 'max' above. Kept both to make terminology with 'len' easier.
Example #1
Usage: lte=10
Example #2 (time.Time)
For time.Time ensures the time value is less than or equal to time.Now.UTC().
Usage: lte
Field Equals Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
Example #1:
// Validation on Password field using:
Usage: eqfield=ConfirmPassword
Example #2:
// Validating by field:
validate.FieldWithValue(password, confirmpassword, "eqfield")
Field Equals Another Field (relative)
This does the same as eqfield except that it validates the field provided relative
to the top level struct.
Usage: eqcsfield=InnerStructField.Field)
Field Does Not Equal Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
Examples:
// Confirm two colors are not the same:
//
// Validation on Color field:
Usage: nefield=Color2
// Validating by field:
validate.FieldWithValue(color1, color2, "nefield")
Field Does Not Equal Another Field (relative)
This does the same as nefield except that it validates the field provided
relative to the top level struct.
Usage: necsfield=InnerStructField.Field
Field Greater Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(gtfield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "gtfield")
Field Greater Than Another Relative Field
This does the same as gtfield except that it validates the field provided
relative to the top level struct.
Usage: gtcsfield=InnerStructField.Field
Field Greater Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(gtefield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "gtefield")
Field Greater Than or Equal To Another Relative Field
This does the same as gtefield except that it validates the field provided relative
to the top level struct.
Usage: gtecsfield=InnerStructField.Field
Less Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(ltfield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "ltfield")
Less Than Another Relative Field
This does the same as ltfield except that it validates the field provided relative
to the top level struct.
Usage: ltcsfield=InnerStructField.Field
Less Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(ltefield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "ltefield")
Less Than or Equal To Another Relative Field
This does the same as ltefield except that it validates the field provided relative
to the top level struct.
Usage: ltecsfield=InnerStructField.Field
Alpha Only
This validates that a string value contains alpha characters only
Usage: alpha
Alphanumeric
This validates that a string value contains alphanumeric characters only
Usage: alphanum
Numeric
This validates that a string value contains a basic numeric value.
basic excludes exponents etc...
Usage: numeric
Hexadecimal String
This validates that a string value contains a valid hexadecimal.
Usage: hexadecimal
Hexcolor String
This validates that a string value contains a valid hex color including
hashtag (#)
Usage: hexcolor
RGB String
This validates that a string value contains a valid rgb color
Usage: rgb
RGBA String
This validates that a string value contains a valid rgba color
Usage: rgba
HSL String
This validates that a string value contains a valid hsl color
Usage: hsl
HSLA String
This validates that a string value contains a valid hsla color
Usage: hsla
E-mail String
This validates that a string value contains a valid email
This may not conform to all possibilities of any rfc standard, but neither
does any email provider accept all posibilities.
Usage: email
URL String
This validates that a string value contains a valid url
This will accept any url the golang request uri accepts but must contain
a schema for example http:// or rtmp://
Usage: url
URI String
This validates that a string value contains a valid uri
This will accept any uri the golang request uri accepts
Usage: uri
Base64 String
This validates that a string value contains a valid base64 value.
Although an empty string is valid base64 this will report an empty string
as an error, if you wish to accept an empty string as valid you can use
this with the omitempty tag.
Usage: base64
Contains
This validates that a string value contains the substring value.
Usage: contains=@
Contains Any
This validates that a string value contains any Unicode code points
in the substring value.
Usage: containsany=!@#?
Contains Rune
This validates that a string value contains the supplied rune value.
Usage: containsrune=@
Excludes
This validates that a string value does not contain the substring value.
Usage: excludes=@
Excludes All
This validates that a string value does not contain any Unicode code
points in the substring value.
Usage: excludesall=!@#?
Excludes Rune
This validates that a string value does not contain the supplied rune value.
Usage: excludesrune=@
International Standard Book Number
This validates that a string value contains a valid isbn10 or isbn13 value.
Usage: isbn
International Standard Book Number 10
This validates that a string value contains a valid isbn10 value.
Usage: isbn10
International Standard Book Number 13
This validates that a string value contains a valid isbn13 value.
Usage: isbn13
Universally Unique Identifier UUID
This validates that a string value contains a valid UUID.
Usage: uuid
Universally Unique Identifier UUID v3
This validates that a string value contains a valid version 3 UUID.
Usage: uuid3
Universally Unique Identifier UUID v4
This validates that a string value contains a valid version 4 UUID.
Usage: uuid4
Universally Unique Identifier UUID v5
This validates that a string value contains a valid version 5 UUID.
Usage: uuid5
ASCII
This validates that a string value contains only ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: ascii
Printable ASCII
This validates that a string value contains only printable ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: asciiprint
Multi-Byte Characters
This validates that a string value contains one or more multibyte characters.
NOTE: if the string is blank, this validates as true.
Usage: multibyte
Data URL
This validates that a string value contains a valid DataURI.
NOTE: this will also validate that the data portion is valid base64
Usage: datauri
Latitude
This validates that a string value contains a valid latitude.
Usage: latitude
Longitude
This validates that a string value contains a valid longitude.
Usage: longitude
Social Security Number SSN
This validates that a string value contains a valid U.S. Social Security Number.
Usage: ssn
Internet Protocol Address IP
This validates that a string value contains a valid IP Adress.
Usage: ip
Internet Protocol Address IPv4
This validates that a string value contains a valid v4 IP Adress.
Usage: ipv4
Internet Protocol Address IPv6
This validates that a string value contains a valid v6 IP Adress.
Usage: ipv6
Classless Inter-Domain Routing CIDR
This validates that a string value contains a valid CIDR Adress.
Usage: cidr
Classless Inter-Domain Routing CIDRv4
This validates that a string value contains a valid v4 CIDR Adress.
Usage: cidrv4
Classless Inter-Domain Routing CIDRv6
This validates that a string value contains a valid v6 CIDR Adress.
Usage: cidrv6
Media Access Control Address MAC
This validates that a string value contains a valid MAC Adress.
Usage: mac
Note: See Go's ParseMAC for accepted formats and types:
http://golang.org/src/net/mac.go?s=866:918#L29
Usage: mac
Alias Validators and Tags
NOTE: When returning an error, the tag returned in "FieldError" will be
the alias tag unless the dive tag is part of the alias. Everything after the
dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
case will be the actual tag within the alias that failed.
Here is a list of the current built in alias tags:
"iscolor"
alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
Validator notes:
regex
a regex validator won't be added because commas and = signs can be part
of a regex which conflict with the validation definitions. Although
workarounds can be made, they take away from using pure regex's.
Furthermore it's quick and dirty but the regex's become harder to
maintain and are not reusable, so it's as much a programming philosiphy
as anything.
In place of this new validator functions should be created; a regex can
be used within the validator function and even be precompiled for better
efficiency within regexes.go.
And the best reason, you can submit a pull request and we can keep on
adding to the validation library of this package!
Panics
This package panics when bad input is provided, this is by design, bad code like
that should not make it to production.
type Test struct {
TestField string `validate:"nonexistantfunction=1"`
}
t := &Test{
TestField: "Test"
}
validate.Struct(t) // this will panic
*/
package validator

View File

@@ -0,0 +1,45 @@
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// DbBackedUser User struct
type DbBackedUser struct {
Name sql.NullString `validate:"required"`
Age sql.NullInt64 `validate:"required"`
}
func main() {
config := &validator.Config{TagName: "validate"}
validate := validator.New(config)
// register all sql.Null* types to use the ValidateValuer CustomTypeFunc
validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{})
x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}}
errs := validate.Struct(x)
if errs != nil {
fmt.Printf("Errs:\n%+v\n", errs)
}
}
// ValidateValuer implements validator.CustomTypeFunc
func ValidateValuer(field reflect.Value) interface{} {
if valuer, ok := field.Interface().(driver.Valuer); ok {
val, err := valuer.Value()
if err == nil {
return val
}
// handle the error how you want
}
return nil
}

View File

@@ -0,0 +1,155 @@
package main
import (
"errors"
"fmt"
"reflect"
sql "database/sql/driver"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `validate:"required"`
LastName string `validate:"required"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validateStruct()
validateField()
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
}
user := &User{
FirstName: "Badger",
LastName: "Smith",
Age: 135,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag
// Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag
err := errs.(validator.ValidationErrors)["User.Addresses[0].City"]
fmt.Println(err.Field) // output: City
fmt.Println(err.Tag) // output: required
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}
func validateField() {
myEmail := "joeybloggs.gmail.com"
errs := validate.Field(myEmail, "required,email")
if errs != nil {
fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag
return
}
// email ok, move on
}
var validate2 *validator.Validate
type valuer struct {
Name string
}
func (v valuer) Value() (sql.Value, error) {
if v.Name == "errorme" {
return nil, errors.New("some kind of error")
}
if v.Name == "blankme" {
return "", nil
}
if len(v.Name) == 0 {
return nil, nil
}
return v.Name, nil
}
// ValidateValuerType implements validator.CustomTypeFunc
func ValidateValuerType(field reflect.Value) interface{} {
if valuer, ok := field.Interface().(sql.Valuer); ok {
val, err := valuer.Value()
if err != nil {
// handle the error how you want
return nil
}
return val
}
return nil
}
func main2() {
config := &validator.Config{TagName: "validate"}
validate2 = validator.New(config)
validate2.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{})
validateCustomFieldType()
}
func validateCustomFieldType() {
val := valuer{
Name: "blankme",
}
errs := validate2.Field(val, "required")
if errs != nil {
fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "required" tag
return
}
// all ok
}

View File

@@ -0,0 +1,99 @@
package main
import (
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `json:"fname"`
LastName string `json:"lname"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validate.RegisterStructValidation(UserStructLevelValidation, User{})
validateStruct()
}
// UserStructLevelValidation contains custom struct level validations that don't always
// make sense at the field validation level. For Example this function validates that either
// FirstName or LastName exist; could have done that with a custom field validation but then
// would have had to add it to both fields duplicating the logic + overhead, this way it's
// only validated once.
//
// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way
// hooks right into validator and you can combine with validation tags and still have a
// common error output format.
func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) {
user := structLevel.CurrentStruct.Interface().(User)
if len(user.FirstName) == 0 && len(user.LastName) == 0 {
structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname")
structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname")
}
// plus can to more, even with different tag than "fnameorlname"
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
City: "Unknown",
}
user := &User{
FirstName: "",
LastName: "",
Age: 45,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag
// Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag
err := errs.(validator.ValidationErrors)["User.FirstName"]
fmt.Println(err.Field) // output: FirstName
fmt.Println(err.Tag) // output: fnameorlname
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}

View File

@@ -0,0 +1,83 @@
package validator_test
import (
"fmt"
"gopkg.in/go-playground/validator.v8"
)
func ExampleValidate_new() {
config := &validator.Config{TagName: "validate"}
validator.New(config)
}
func ExampleValidate_field() {
// This should be stored somewhere globally
var validate *validator.Validate
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
i := 0
errs := validate.Field(i, "gt=1,lte=10")
err := errs.(validator.ValidationErrors)[""]
fmt.Println(err.Field)
fmt.Println(err.Tag)
fmt.Println(err.Kind) // NOTE: Kind and Type can be different i.e. time Kind=struct and Type=time.Time
fmt.Println(err.Type)
fmt.Println(err.Param)
fmt.Println(err.Value)
//Output:
//
//gt
//int
//int
//1
//0
}
func ExampleValidate_struct() {
// This should be stored somewhere globally
var validate *validator.Validate
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
type ContactInformation struct {
Phone string `validate:"required"`
Street string `validate:"required"`
City string `validate:"required"`
}
type User struct {
Name string `validate:"required,excludesall=!@#$%^&*()_+-=:;?/0x2C"` // 0x2C = comma (,)
Age int8 `validate:"required,gt=0,lt=150"`
Email string `validate:"email"`
ContactInformation []*ContactInformation
}
contactInfo := &ContactInformation{
Street: "26 Here Blvd.",
City: "Paradeso",
}
user := &User{
Name: "Joey Bloggs",
Age: 31,
Email: "joeybloggs@gmail.com",
ContactInformation: []*ContactInformation{contactInfo},
}
errs := validate.Struct(user)
for _, v := range errs.(validator.ValidationErrors) {
fmt.Println(v.Field) // Phone
fmt.Println(v.Tag) // required
//... and so forth
//Output:
//Phone
//required
}
}

BIN
vendor/gopkg.in/go-playground/validator.v8/logo.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

59
vendor/gopkg.in/go-playground/validator.v8/regexes.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
package validator
import "regexp"
const (
alphaRegexString = "^[a-zA-Z]+$"
alphaNumericRegexString = "^[a-zA-Z0-9]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
hexadecimalRegexString = "^[0-9a-fA-F]+$"
hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
aSCIIRegexString = "^[\x00-\x7F]*$"
printableASCIIRegexString = "^[\x20-\x7E]*$"
multibyteRegexString = "[^\x00-\x7F]"
dataURIRegexString = "^data:.+\\/(.+);base64$"
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
)
var (
alphaRegex = regexp.MustCompile(alphaRegexString)
alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
numericRegex = regexp.MustCompile(numericRegexString)
numberRegex = regexp.MustCompile(numberRegexString)
hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
rgbRegex = regexp.MustCompile(rgbRegexString)
rgbaRegex = regexp.MustCompile(rgbaRegexString)
hslRegex = regexp.MustCompile(hslRegexString)
hslaRegex = regexp.MustCompile(hslaRegexString)
emailRegex = regexp.MustCompile(emailRegexString)
base64Regex = regexp.MustCompile(base64RegexString)
iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
uUID3Regex = regexp.MustCompile(uUID3RegexString)
uUID4Regex = regexp.MustCompile(uUID4RegexString)
uUID5Regex = regexp.MustCompile(uUID5RegexString)
uUIDRegex = regexp.MustCompile(uUIDRegexString)
aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
multibyteRegex = regexp.MustCompile(multibyteRegexString)
dataURIRegex = regexp.MustCompile(dataURIRegexString)
latitudeRegex = regexp.MustCompile(latitudeRegexString)
longitudeRegex = regexp.MustCompile(longitudeRegexString)
sSNRegex = regexp.MustCompile(sSNRegexString)
)

382
vendor/gopkg.in/go-playground/validator.v8/util.go generated vendored Normal file
View File

@@ -0,0 +1,382 @@
package validator
import (
"fmt"
"reflect"
"strconv"
"strings"
)
const (
dash = "-"
blank = ""
namespaceSeparator = "."
leftBracket = "["
rightBracket = "]"
restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
)
var (
restrictedTags = map[string]*struct{}{
diveTag: emptyStructPtr,
existsTag: emptyStructPtr,
structOnlyTag: emptyStructPtr,
omitempty: emptyStructPtr,
skipValidationTag: emptyStructPtr,
utf8HexComma: emptyStructPtr,
utf8Pipe: emptyStructPtr,
noStructLevelTag: emptyStructPtr,
}
)
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
// it is exposed for use within you Custom Functions
func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) {
switch current.Kind() {
case reflect.Ptr:
if current.IsNil() {
return current, reflect.Ptr
}
return v.ExtractType(current.Elem())
case reflect.Interface:
if current.IsNil() {
return current, reflect.Interface
}
return v.ExtractType(current.Elem())
case reflect.Invalid:
return current, reflect.Invalid
default:
if v.hasCustomFuncs {
// fmt.Println("Type", current.Type())
if fn, ok := v.customTypeFuncs[current.Type()]; ok {
// fmt.Println("OK")
return v.ExtractType(reflect.ValueOf(fn(current)))
}
// fmt.Println("NOT OK")
}
return current, current.Kind()
}
}
// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and
// returns the field, field kind and whether is was successful in retrieving the field at all.
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrived because it didnt exist.
func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
current, kind := v.ExtractType(current)
if kind == reflect.Invalid {
return current, kind, false
}
if namespace == blank {
return current, kind, true
}
switch kind {
case reflect.Ptr, reflect.Interface:
return current, kind, false
case reflect.Struct:
typ := current.Type()
fld := namespace
ns := namespace
if typ != timeType && typ != timePtrType {
idx := strings.Index(namespace, namespaceSeparator)
if idx != -1 {
fld = namespace[:idx]
ns = namespace[idx+1:]
} else {
ns = blank
idx = len(namespace)
}
bracketIdx := strings.Index(fld, leftBracket)
if bracketIdx != -1 {
fld = fld[:bracketIdx]
ns = namespace[bracketIdx:]
}
current = current.FieldByName(fld)
return v.GetStructFieldOK(current, ns)
}
case reflect.Array, reflect.Slice:
idx := strings.Index(namespace, leftBracket)
idx2 := strings.Index(namespace, rightBracket)
arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
if arrIdx >= current.Len() {
return current, kind, false
}
startIdx := idx2 + 1
if startIdx < len(namespace) {
if namespace[startIdx:startIdx+1] == namespaceSeparator {
startIdx++
}
}
return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:])
case reflect.Map:
idx := strings.Index(namespace, leftBracket) + 1
idx2 := strings.Index(namespace, rightBracket)
endIdx := idx2
if endIdx+1 < len(namespace) {
if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
endIdx++
}
}
key := namespace[idx:idx2]
switch current.Type().Key().Kind() {
case reflect.Int:
i, _ := strconv.Atoi(key)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Int8:
i, _ := strconv.ParseInt(key, 10, 8)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:])
case reflect.Int16:
i, _ := strconv.ParseInt(key, 10, 16)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:])
case reflect.Int32:
i, _ := strconv.ParseInt(key, 10, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:])
case reflect.Int64:
i, _ := strconv.ParseInt(key, 10, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Uint:
i, _ := strconv.ParseUint(key, 10, 0)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:])
case reflect.Uint8:
i, _ := strconv.ParseUint(key, 10, 8)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:])
case reflect.Uint16:
i, _ := strconv.ParseUint(key, 10, 16)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:])
case reflect.Uint32:
i, _ := strconv.ParseUint(key, 10, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:])
case reflect.Uint64:
i, _ := strconv.ParseUint(key, 10, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Float32:
f, _ := strconv.ParseFloat(key, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:])
case reflect.Float64:
f, _ := strconv.ParseFloat(key, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:])
case reflect.Bool:
b, _ := strconv.ParseBool(key)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:])
// reflect.Type = string
default:
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:])
}
}
// if got here there was more namespace, cannot go any deeper
panic("Invalid field namespace")
}
// asInt retuns the parameter as a int64
// or panics if it can't convert
func asInt(param string) int64 {
i, err := strconv.ParseInt(param, 0, 64)
panicIf(err)
return i
}
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {
i, err := strconv.ParseUint(param, 0, 64)
panicIf(err)
return i
}
// asFloat returns the parameter as a float64
// or panics if it can't convert
func asFloat(param string) float64 {
i, err := strconv.ParseFloat(param, 64)
panicIf(err)
return i
}
func panicIf(err error) {
if err != nil {
panic(err.Error())
}
}
func (v *Validate) parseStruct(current reflect.Value, sName string) *cachedStruct {
typ := current.Type()
s := &cachedStruct{Name: sName, fields: map[int]cachedField{}}
numFields := current.NumField()
var fld reflect.StructField
var tag string
var customName string
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if fld.PkgPath != blank {
continue
}
tag = fld.Tag.Get(v.tagName)
if tag == skipValidationTag {
continue
}
customName = fld.Name
if v.fieldNameTag != blank {
name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
// dash check is for json "-" (aka skipValidationTag) means don't output in json
if name != "" && name != skipValidationTag {
customName = name
}
}
cTag, ok := v.tagCache.Get(tag)
if !ok {
cTag = v.parseTags(tag, fld.Name)
}
s.fields[i] = cachedField{Idx: i, Name: fld.Name, AltName: customName, CachedTag: cTag}
}
v.structCache.Set(typ, s)
return s
}
func (v *Validate) parseTags(tag, fieldName string) *cachedTag {
cTag := &cachedTag{tag: tag}
v.parseTagsRecursive(cTag, tag, fieldName, blank, false)
v.tagCache.Set(tag, cTag)
return cTag
}
func (v *Validate) parseTagsRecursive(cTag *cachedTag, tag, fieldName, alias string, isAlias bool) bool {
if tag == blank {
return true
}
for _, t := range strings.Split(tag, tagSeparator) {
if v.hasAliasValidators {
// check map for alias and process new tags, otherwise process as usual
if tagsVal, ok := v.aliasValidators[t]; ok {
leave := v.parseTagsRecursive(cTag, tagsVal, fieldName, t, true)
if leave {
return leave
}
continue
}
}
switch t {
case diveTag:
cTag.diveTag = tag
tVals := &tagVals{tagVals: [][]string{{t}}}
cTag.tags = append(cTag.tags, tVals)
return true
case omitempty:
cTag.isOmitEmpty = true
case structOnlyTag:
cTag.isStructOnly = true
case noStructLevelTag:
cTag.isNoStructLevel = true
}
// if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
orVals := strings.Split(t, orSeparator)
tagVal := &tagVals{isAlias: isAlias, isOrVal: len(orVals) > 1, tagVals: make([][]string, len(orVals))}
cTag.tags = append(cTag.tags, tagVal)
var key string
var param string
for i, val := range orVals {
vals := strings.SplitN(val, tagKeySeparator, 2)
key = vals[0]
tagVal.tag = key
if isAlias {
tagVal.tag = alias
}
if key == blank {
panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
}
if len(vals) > 1 {
param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
}
tagVal.tagVals[i] = []string{key, param}
}
}
return false
}

797
vendor/gopkg.in/go-playground/validator.v8/validator.go generated vendored Normal file
View File

@@ -0,0 +1,797 @@
/**
* Package validator
*
* MISC:
* - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank
*
*/
package validator
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
)
const (
utf8HexComma = "0x2C"
utf8Pipe = "0x7C"
tagSeparator = ","
orSeparator = "|"
tagKeySeparator = "="
structOnlyTag = "structonly"
noStructLevelTag = "nostructlevel"
omitempty = "omitempty"
skipValidationTag = "-"
diveTag = "dive"
existsTag = "exists"
fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
arrayIndexFieldName = "%s" + leftBracket + "%d" + rightBracket
mapIndexFieldName = "%s" + leftBracket + "%v" + rightBracket
invalidValidation = "Invalid validation tag on field %s"
undefinedValidation = "Undefined validation function on field %s"
validatorNotInitialized = "Validator instance not initialized"
fieldNameRequired = "Field Name Required"
tagRequired = "Tag Required"
)
var (
timeType = reflect.TypeOf(time.Time{})
timePtrType = reflect.TypeOf(&time.Time{})
emptyStructPtr = new(struct{})
)
// StructLevel contains all of the information and helper methods
// for reporting errors during struct level validation
type StructLevel struct {
TopStruct reflect.Value
CurrentStruct reflect.Value
errPrefix string
nsPrefix string
errs ValidationErrors
v *Validate
}
// ReportValidationErrors accepts the key relative to the top level struct and validatin errors.
// Example: had a triple nested struct User, ContactInfo, Country and ran errs := validate.Struct(country)
// from within a User struct level validation would call this method like so:
// ReportValidationErrors("ContactInfo.", errs)
// NOTE: relativeKey can contain both the Field Relative and Custom name relative paths
// i.e. ReportValidationErrors("ContactInfo.|cInfo", errs) where cInfo represents say the JSON name of
// the relative path; this will be split into 2 variables in the next valiator version.
func (sl *StructLevel) ReportValidationErrors(relativeKey string, errs ValidationErrors) {
for _, e := range errs {
idx := strings.Index(relativeKey, "|")
var rel string
var cRel string
if idx != -1 {
rel = relativeKey[:idx]
cRel = relativeKey[idx+1:]
} else {
rel = relativeKey
}
key := sl.errPrefix + rel + e.Field
e.FieldNamespace = key
e.NameNamespace = sl.nsPrefix + cRel + e.Name
sl.errs[key] = e
}
}
// ReportError reports an error just by passing the field and tag information
// NOTE: tag can be an existing validation tag or just something you make up
// and precess on the flip side it's up to you.
func (sl *StructLevel) ReportError(field reflect.Value, fieldName string, customName string, tag string) {
field, kind := sl.v.ExtractType(field)
if fieldName == blank {
panic(fieldNameRequired)
}
if customName == blank {
customName = fieldName
}
if tag == blank {
panic(tagRequired)
}
ns := sl.errPrefix + fieldName
switch kind {
case reflect.Invalid:
sl.errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: sl.nsPrefix + customName,
Name: customName,
Field: fieldName,
Tag: tag,
ActualTag: tag,
Param: blank,
Kind: kind,
}
default:
sl.errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: sl.nsPrefix + customName,
Name: customName,
Field: fieldName,
Tag: tag,
ActualTag: tag,
Param: blank,
Value: field.Interface(),
Kind: kind,
Type: field.Type(),
}
}
}
// Validate contains the validator settings passed in using the Config struct
type Validate struct {
tagName string
fieldNameTag string
validationFuncs map[string]Func
structLevelFuncs map[reflect.Type]StructLevelFunc
customTypeFuncs map[reflect.Type]CustomTypeFunc
aliasValidators map[string]string
hasCustomFuncs bool
hasAliasValidators bool
hasStructLevelFuncs bool
tagCache *tagCacheMap
structCache *structCacheMap
errsPool *sync.Pool
}
func (v *Validate) initCheck() {
if v == nil {
panic(validatorNotInitialized)
}
}
// Config contains the options that a Validator instance will use.
// It is passed to the New() function
type Config struct {
TagName string
FieldNameTag string
}
// CustomTypeFunc allows for overriding or adding custom field type handler functions
// field = field value of the type to return a value to be validated
// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
type CustomTypeFunc func(field reflect.Value) interface{}
// Func accepts all values needed for file and cross field validation
// v = validator instance, needed but some built in functions for it's custom types
// topStruct = top level struct when validating by struct otherwise nil
// currentStruct = current level struct when validating by struct otherwise optional comparison value
// field = field value for validation
// param = parameter used in validation i.e. gt=0 param would be 0
type Func func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldtype reflect.Type, fieldKind reflect.Kind, param string) bool
// StructLevelFunc accepts all values needed for struct level validation
type StructLevelFunc func(v *Validate, structLevel *StructLevel)
// ValidationErrors is a type of map[string]*FieldError
// it exists to allow for multiple errors to be passed from this library
// and yet still subscribe to the error interface
type ValidationErrors map[string]*FieldError
// Error is intended for use in development + debugging and not intended to be a production error message.
// It allows ValidationErrors to subscribe to the Error interface.
// All information to create an error message specific to your application is contained within
// the FieldError found within the ValidationErrors map
func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString(blank)
for key, err := range ve {
buff.WriteString(fmt.Sprintf(fieldErrMsg, key, err.Field, err.Tag))
buff.WriteString("\n")
}
return strings.TrimSpace(buff.String())
}
// FieldError contains a single field's validation error along
// with other properties that may be needed for error message creation
type FieldError struct {
FieldNamespace string
NameNamespace string
Field string
Name string
Tag string
ActualTag string
Kind reflect.Kind
Type reflect.Type
Param string
Value interface{}
}
// New creates a new Validate instance for use.
func New(config *Config) *Validate {
v := &Validate{
tagName: config.TagName,
fieldNameTag: config.FieldNameTag,
tagCache: &tagCacheMap{m: map[string]*cachedTag{}},
structCache: &structCacheMap{m: map[reflect.Type]*cachedStruct{}},
errsPool: &sync.Pool{New: func() interface{} {
return ValidationErrors{}
}}}
if len(v.aliasValidators) == 0 {
// must copy alias validators for separate validations to be used in each validator instance
v.aliasValidators = map[string]string{}
for k, val := range bakedInAliasValidators {
v.RegisterAliasValidation(k, val)
}
}
if len(v.validationFuncs) == 0 {
// must copy validators for separate validations to be used in each instance
v.validationFuncs = map[string]Func{}
for k, val := range bakedInValidators {
v.RegisterValidation(k, val)
}
}
return v
}
// RegisterStructValidation registers a StructLevelFunc against a number of types
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
v.initCheck()
if v.structLevelFuncs == nil {
v.structLevelFuncs = map[reflect.Type]StructLevelFunc{}
}
for _, t := range types {
v.structLevelFuncs[reflect.TypeOf(t)] = fn
}
v.hasStructLevelFuncs = true
}
// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key
// NOTE: if the key already exists, the previous validation function will be replaced.
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterValidation(key string, fn Func) error {
v.initCheck()
if key == blank {
return errors.New("Function Key cannot be empty")
}
if fn == nil {
return errors.New("Function cannot be empty")
}
_, ok := restrictedTags[key]
if ok || strings.ContainsAny(key, restrictedTagChars) {
panic(fmt.Sprintf(restrictedTagErr, key))
}
v.validationFuncs[key] = fn
return nil
}
// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
v.initCheck()
if v.customTypeFuncs == nil {
v.customTypeFuncs = map[reflect.Type]CustomTypeFunc{}
}
for _, t := range types {
v.customTypeFuncs[reflect.TypeOf(t)] = fn
}
v.hasCustomFuncs = true
}
// RegisterAliasValidation registers a mapping of a single validationstag that
// defines a common or complex set of validation(s) to simplify adding validation
// to structs. NOTE: when returning an error the tag returned in FieldError will be
// the alias tag unless the dive tag is part of the alias; everything after the
// dive tag is not reported as the alias tag. Also the ActualTag in the before case
// will be the actual tag within the alias that failed.
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterAliasValidation(alias, tags string) {
v.initCheck()
_, ok := restrictedTags[alias]
if ok || strings.ContainsAny(alias, restrictedTagChars) {
panic(fmt.Sprintf(restrictedAliasErr, alias))
}
v.aliasValidators[alias] = tags
v.hasAliasValidators = true
}
// Field validates a single field using tag style validation and returns nil or ValidationErrors as type error.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) Field(field interface{}, tag string) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
fieldVal := reflect.ValueOf(field)
v.traverseField(fieldVal, fieldVal, fieldVal, blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// FieldWithValue validates a single field, against another fields value using tag style validation and returns nil or ValidationErrors.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) FieldWithValue(val interface{}, field interface{}, tag string) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
topVal := reflect.ValueOf(val)
v.traverseField(topVal, topVal, reflect.ValueOf(field), blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// StructPartial validates the fields passed in only, ignoring all others.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) StructPartial(current interface{}, fields ...string) error {
v.initCheck()
sv, _ := v.ExtractType(reflect.ValueOf(current))
name := sv.Type().Name()
m := map[string]*struct{}{}
if fields != nil {
for _, k := range fields {
flds := strings.Split(k, namespaceSeparator)
if len(flds) > 0 {
key := name + namespaceSeparator
for _, s := range flds {
idx := strings.Index(s, leftBracket)
if idx != -1 {
for idx != -1 {
key += s[:idx]
m[key] = emptyStructPtr
idx2 := strings.Index(s, rightBracket)
idx2++
key += s[idx:idx2]
m[key] = emptyStructPtr
s = s[idx2:]
idx = strings.Index(s, leftBracket)
}
} else {
key += s
m[key] = emptyStructPtr
}
key += namespaceSeparator
}
}
}
}
errs := v.errsPool.Get().(ValidationErrors)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, false, m, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// StructExcept validates all fields except the ones passed in.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) StructExcept(current interface{}, fields ...string) error {
v.initCheck()
sv, _ := v.ExtractType(reflect.ValueOf(current))
name := sv.Type().Name()
m := map[string]*struct{}{}
for _, key := range fields {
m[name+namespaceSeparator+key] = emptyStructPtr
}
errs := v.errsPool.Get().(ValidationErrors)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, true, m, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
// it returns nil or ValidationErrors as error.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) Struct(current interface{}) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
sv := reflect.ValueOf(current)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, false, false, nil, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// tranverseStruct traverses a structs fields and then passes them to be validated by traverseField
func (v *Validate) tranverseStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]*struct{}, isStructOnly bool) {
if current.Kind() == reflect.Ptr && !current.IsNil() {
current = current.Elem()
}
if current.Kind() != reflect.Struct && current.Kind() != reflect.Interface {
panic("value passed for validation is not a struct")
}
// var ok bool
typ := current.Type()
sName := typ.Name()
if useStructName {
errPrefix += sName + namespaceSeparator
if v.fieldNameTag != blank {
nsPrefix += sName + namespaceSeparator
}
}
// structonly tag present don't tranverseFields
// but must still check and run below struct level validation
// if present
if !isStructOnly {
var fld reflect.StructField
// is anonymous struct, cannot parse or cache as
// it has no name to index by
if sName == blank {
var customName string
var ok bool
numFields := current.NumField()
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if fld.PkgPath != blank {
continue
}
if partial {
_, ok = includeExclude[errPrefix+fld.Name]
if (ok && exclude) || (!ok && !exclude) {
continue
}
}
customName = fld.Name
if v.fieldNameTag != blank {
name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
// dash check is for json "-" means don't output in json
if name != blank && name != dash {
customName = name
}
}
v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, fld.Tag.Get(v.tagName), fld.Name, customName, partial, exclude, includeExclude, nil)
}
} else {
s, ok := v.structCache.Get(typ)
if !ok {
s = v.parseStruct(current, sName)
}
for i, f := range s.fields {
if partial {
_, ok = includeExclude[errPrefix+f.Name]
if (ok && exclude) || (!ok && !exclude) {
continue
}
}
fld = typ.Field(i)
v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, f.CachedTag.tag, fld.Name, f.AltName, partial, exclude, includeExclude, f.CachedTag)
}
}
}
// check if any struct level validations, after all field validations already checked.
if v.hasStructLevelFuncs {
if fn, ok := v.structLevelFuncs[current.Type()]; ok {
fn(v, &StructLevel{v: v, TopStruct: topStruct, CurrentStruct: current, errPrefix: errPrefix, nsPrefix: nsPrefix, errs: errs})
}
}
}
// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
func (v *Validate) traverseField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, isStructField bool, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
if tag == skipValidationTag {
return
}
if cTag == nil {
var isCached bool
cTag, isCached = v.tagCache.Get(tag)
if !isCached {
cTag = v.parseTags(tag, name)
}
}
current, kind := v.ExtractType(current)
var typ reflect.Type
switch kind {
case reflect.Ptr, reflect.Interface, reflect.Invalid:
if cTag.isOmitEmpty {
return
}
if tag != blank {
ns := errPrefix + name
if kind == reflect.Invalid {
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: cTag.tags[0].tag,
ActualTag: cTag.tags[0].tagVals[0][0],
Param: cTag.tags[0].tagVals[0][1],
Kind: kind,
}
return
}
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: cTag.tags[0].tag,
ActualTag: cTag.tags[0].tagVals[0][0],
Param: cTag.tags[0].tagVals[0][1],
Value: current.Interface(),
Kind: kind,
Type: current.Type(),
}
return
}
// if we get here tag length is zero and we can leave
if kind == reflect.Invalid {
return
}
case reflect.Struct:
typ = current.Type()
if typ != timeType {
if cTag.isNoStructLevel {
return
}
v.tranverseStruct(topStruct, current, current, errPrefix+name+namespaceSeparator, nsPrefix+customName+namespaceSeparator, errs, false, partial, exclude, includeExclude, cTag.isStructOnly)
return
}
}
if tag == blank {
return
}
typ = current.Type()
var dive bool
var diveSubTag string
for _, valTag := range cTag.tags {
if valTag.tagVals[0][0] == existsTag {
continue
}
if valTag.tagVals[0][0] == diveTag {
dive = true
diveSubTag = strings.TrimLeft(strings.SplitN(cTag.diveTag, diveTag, 2)[1], ",")
break
}
if valTag.tagVals[0][0] == omitempty {
if !HasValue(v, topStruct, currentStruct, current, typ, kind, blank) {
return
}
continue
}
if v.validateField(topStruct, currentStruct, current, typ, kind, errPrefix, nsPrefix, errs, valTag, name, customName) {
return
}
}
if dive {
// traverse slice or map here
// or panic ;)
switch kind {
case reflect.Slice, reflect.Array:
v.traverseSlice(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
case reflect.Map:
v.traverseMap(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
default:
// throw error, if not a slice or map then should not have gotten here
// bad dive tag
panic("dive error! can't dive on a non slice or map")
}
}
}
// traverseSlice traverses a Slice or Array's elements and passes them to traverseField for validation
func (v *Validate) traverseSlice(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
for i := 0; i < current.Len(); i++ {
v.traverseField(topStruct, currentStruct, current.Index(i), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(arrayIndexFieldName, name, i), fmt.Sprintf(arrayIndexFieldName, customName, i), partial, exclude, includeExclude, cTag)
}
}
// traverseMap traverses a map's elements and passes them to traverseField for validation
func (v *Validate) traverseMap(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
for _, key := range current.MapKeys() {
v.traverseField(topStruct, currentStruct, current.MapIndex(key), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(mapIndexFieldName, name, key.Interface()), fmt.Sprintf(mapIndexFieldName, customName, key.Interface()), partial, exclude, includeExclude, cTag)
}
}
// validateField validates a field based on the provided tag's key and param values and returns true if there is an error or false if all ok
func (v *Validate) validateField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, currentType reflect.Type, currentKind reflect.Kind, errPrefix string, nsPrefix string, errs ValidationErrors, valTag *tagVals, name, customName string) bool {
var valFunc Func
var ok bool
if valTag.isOrVal {
errTag := blank
for _, val := range valTag.tagVals {
valFunc, ok = v.validationFuncs[val[0]]
if !ok {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
}
if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, val[1]) {
return false
}
errTag += orSeparator + val[0]
}
ns := errPrefix + name
if valTag.isAlias {
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: valTag.tag,
ActualTag: errTag[1:],
Value: current.Interface(),
Type: currentType,
Kind: currentKind,
}
} else {
errs[errPrefix+name] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: errTag[1:],
ActualTag: errTag[1:],
Value: current.Interface(),
Type: currentType,
Kind: currentKind,
}
}
return true
}
valFunc, ok = v.validationFuncs[valTag.tagVals[0][0]]
if !ok {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
}
if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, valTag.tagVals[0][1]) {
return false
}
ns := errPrefix + name
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: valTag.tag,
ActualTag: valTag.tagVals[0][0],
Value: current.Interface(),
Param: valTag.tagVals[0][1],
Type: currentType,
Kind: currentKind,
}
return true
}

File diff suppressed because it is too large Load Diff

45
vendor/gopkg.in/mgo.v2/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,45 @@
language: go
go_import_path: gopkg.in/mgo.v2
addons:
apt:
packages:
env:
global:
- BUCKET=https://niemeyer.s3.amazonaws.com
matrix:
- GO=1.4.1 MONGODB=x86_64-2.2.7
- GO=1.4.1 MONGODB=x86_64-2.4.14
- GO=1.4.1 MONGODB=x86_64-2.6.11
- GO=1.4.1 MONGODB=x86_64-3.0.9
- GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal
- GO=1.5.3 MONGODB=x86_64-3.0.9
- GO=1.6 MONGODB=x86_64-3.0.9
install:
- eval "$(gimme $GO)"
- wget $BUCKET/mongodb-linux-$MONGODB.tgz
- tar xzvf mongodb-linux-$MONGODB.tgz
- export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH
- wget $BUCKET/daemontools.tar.gz
- tar xzvf daemontools.tar.gz
- export PATH=$PWD/daemontools:$PATH
- go get gopkg.in/check.v1
- go get gopkg.in/yaml.v2
- go get gopkg.in/tomb.v2
before_script:
- export NOIPV6=1
- make startdb
script:
- (cd bson && go test -check.v)
- go test -check.v -fast
- (cd txn && go test -check.v)
# vim:sw=4:ts=4:et

25
vendor/gopkg.in/mgo.v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
mgo - MongoDB driver for Go
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/gopkg.in/mgo.v2/Makefile generated vendored Normal file
View File

@@ -0,0 +1,5 @@
startdb:
@harness/setup.sh start
stopdb:
@harness/setup.sh stop

4
vendor/gopkg.in/mgo.v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,4 @@
The MongoDB driver for Go
-------------------------
Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.

467
vendor/gopkg.in/mgo.v2/auth.go generated vendored Normal file
View File

@@ -0,0 +1,467 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"sync"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/internal/scram"
)
type authCmd struct {
Authenticate int
Nonce string
User string
Key string
}
type startSaslCmd struct {
StartSASL int `bson:"startSasl"`
}
type authResult struct {
ErrMsg string
Ok bool
}
type getNonceCmd struct {
GetNonce int
}
type getNonceResult struct {
Nonce string
Err string "$err"
Code int
}
type logoutCmd struct {
Logout int
}
type saslCmd struct {
Start int `bson:"saslStart,omitempty"`
Continue int `bson:"saslContinue,omitempty"`
ConversationId int `bson:"conversationId,omitempty"`
Mechanism string `bson:"mechanism,omitempty"`
Payload []byte
}
type saslResult struct {
Ok bool `bson:"ok"`
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
Done bool
ConversationId int `bson:"conversationId"`
Payload []byte
ErrMsg string
}
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
func (socket *mongoSocket) getNonce() (nonce string, err error) {
socket.Lock()
for socket.cachedNonce == "" && socket.dead == nil {
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
socket.gotNonce.Wait()
}
if socket.cachedNonce == "mongos" {
socket.Unlock()
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
}
debugf("Socket %p to %s: got nonce", socket, socket.addr)
nonce, err = socket.cachedNonce, socket.dead
socket.cachedNonce = ""
socket.Unlock()
if err != nil {
nonce = ""
}
return
}
func (socket *mongoSocket) resetNonce() {
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
op := &queryOp{}
op.query = &getNonceCmd{GetNonce: 1}
op.collection = "admin.$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
if err != nil {
socket.kill(errors.New("getNonce: "+err.Error()), true)
return
}
result := &getNonceResult{}
err = bson.Unmarshal(docData, &result)
if err != nil {
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
return
}
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
if result.Code == 13390 {
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
result.Nonce = "mongos"
} else if result.Nonce == "" {
var msg string
if result.Err != "" {
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
} else {
msg = "Got an empty nonce"
}
socket.kill(errors.New(msg), true)
return
}
socket.Lock()
if socket.cachedNonce != "" {
socket.Unlock()
panic("resetNonce: nonce already cached")
}
socket.cachedNonce = result.Nonce
socket.gotNonce.Signal()
socket.Unlock()
}
err := socket.Query(op)
if err != nil {
socket.kill(errors.New("resetNonce: "+err.Error()), true)
}
}
func (socket *mongoSocket) Login(cred Credential) error {
socket.Lock()
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
cred.Mechanism = "SCRAM-SHA-1"
}
for _, sockCred := range socket.creds {
if sockCred == cred {
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
socket.Unlock()
return nil
}
}
if socket.dropLogout(cred) {
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
}
socket.Unlock()
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
var err error
switch cred.Mechanism {
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
err = socket.loginClassic(cred)
case "PLAIN":
err = socket.loginPlain(cred)
case "MONGODB-X509":
err = socket.loginX509(cred)
default:
// Try SASL for everything else, if it is available.
err = socket.loginSASL(cred)
}
if err != nil {
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
} else {
debugf("Socket %p to %s: login successful", socket, socket.addr)
}
return err
}
func (socket *mongoSocket) loginClassic(cred Credential) error {
// Note that this only works properly because this function is
// synchronous, which means the nonce won't get reset while we're
// using it and any other login requests will block waiting for a
// new nonce provided in the defer call below.
nonce, err := socket.getNonce()
if err != nil {
return err
}
defer socket.resetNonce()
psum := md5.New()
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
ksum := md5.New()
ksum.Write([]byte(nonce + cred.Username))
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
key := hex.EncodeToString(ksum.Sum(nil))
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
type authX509Cmd struct {
Authenticate int
User string
Mechanism string
}
func (socket *mongoSocket) loginX509(cred Credential) error {
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginPlain(cred Credential) error {
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginSASL(cred Credential) error {
var sasl saslStepper
var err error
if cred.Mechanism == "SCRAM-SHA-1" {
// SCRAM is handled without external libraries.
sasl = saslNewScram(cred)
} else if len(cred.ServiceHost) > 0 {
sasl, err = saslNew(cred, cred.ServiceHost)
} else {
sasl, err = saslNew(cred, socket.Server().Addr)
}
if err != nil {
return err
}
defer sasl.Close()
// The goal of this logic is to carry a locked socket until the
// local SASL step confirms the auth is valid; the socket needs to be
// locked so that concurrent action doesn't leave the socket in an
// auth state that doesn't reflect the operations that took place.
// As a simple case, imagine inverting login=>logout to logout=>login.
//
// The logic below works because the lock func isn't called concurrently.
locked := false
lock := func(b bool) {
if locked != b {
locked = b
if b {
socket.Lock()
} else {
socket.Unlock()
}
}
}
lock(true)
defer lock(false)
start := 1
cmd := saslCmd{}
res := saslResult{}
for {
payload, done, err := sasl.Step(res.Payload)
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
lock(false)
cmd = saslCmd{
Start: start,
Continue: 1 - start,
ConversationId: res.ConversationId,
Mechanism: cred.Mechanism,
Payload: payload,
}
start = 0
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
// See the comment on lock for why this is necessary.
lock(true)
if !res.Ok || res.NotOk {
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
}
return nil
})
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
}
return nil
}
func saslNewScram(cred Credential) *saslScram {
credsum := md5.New()
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
return &saslScram{cred: cred, client: client}
}
type saslScram struct {
cred Credential
client *scram.Client
}
func (s *saslScram) Close() {}
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
more := s.client.Step(serverData)
return s.client.Out(), !more, s.client.Err()
}
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
var mutex sync.Mutex
var replyErr error
mutex.Lock()
op := queryOp{}
op.query = query
op.collection = db + ".$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
defer mutex.Unlock()
if err != nil {
replyErr = err
return
}
err = bson.Unmarshal(docData, result)
if err != nil {
replyErr = err
} else {
// Must handle this within the read loop for the socket, so
// that concurrent login requests are properly ordered.
replyErr = f()
}
}
err := socket.Query(&op)
if err != nil {
return err
}
mutex.Lock() // Wait.
return replyErr
}
func (socket *mongoSocket) Logout(db string) {
socket.Lock()
cred, found := socket.dropAuth(db)
if found {
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
socket.logout = append(socket.logout, cred)
}
socket.Unlock()
}
func (socket *mongoSocket) LogoutAll() {
socket.Lock()
if l := len(socket.creds); l > 0 {
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
socket.logout = append(socket.logout, socket.creds...)
socket.creds = socket.creds[0:0]
}
socket.Unlock()
}
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
socket.Lock()
if l := len(socket.logout); l > 0 {
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
for i := 0; i != l; i++ {
op := queryOp{}
op.query = &logoutCmd{1}
op.collection = socket.logout[i].Source + ".$cmd"
op.limit = -1
ops = append(ops, &op)
}
socket.logout = socket.logout[0:0]
}
socket.Unlock()
return
}
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
for i, sockCred := range socket.creds {
if sockCred.Source == db {
copy(socket.creds[i:], socket.creds[i+1:])
socket.creds = socket.creds[:len(socket.creds)-1]
return sockCred, true
}
}
return cred, false
}
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
for i, sockCred := range socket.logout {
if sockCred == cred {
copy(socket.logout[i:], socket.logout[i+1:])
socket.logout = socket.logout[:len(socket.logout)-1]
return true
}
}
return false
}

1180
vendor/gopkg.in/mgo.v2/auth_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

25
vendor/gopkg.in/mgo.v2/bson/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

738
vendor/gopkg.in/mgo.v2/bson/bson.go generated vendored Normal file
View File

@@ -0,0 +1,738 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// DocElem is an element of the bson.D document representation.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = readRandomUint32()
// readRandomUint32 returns a random objectIdCounter.
func readRandomUint32() uint32 {
var b [4]byte
_, err := io.ReadFull(rand.Reader, b[:])
if err != nil {
panic(fmt.Errorf("cannot read random object id: %v", err))
}
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
}
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
var processId = os.Getpid()
// readMachineId generates and returns a machine id.
// If this function fails to get the hostname it will cause a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
b[7] = byte(processId >> 8)
b[8] = byte(processId)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
var nullBytes = []byte("null")
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
var v struct {
Id json.RawMessage `json:"$oid"`
Func struct {
Id json.RawMessage
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err == nil {
if len(v.Id) > 0 {
data = []byte(v.Id)
} else {
data = []byte(v.Func.Id)
}
}
}
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
*id = ""
return nil
}
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
func (id ObjectId) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%x", string(id))), nil
}
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
func (id *ObjectId) UnmarshalText(data []byte) error {
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
*id = ""
return nil
}
if len(data) != 24 {
return fmt.Errorf("invalid ObjectId: %s", data)
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[:])
if err != nil {
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
// DBPointer refers to a document id in a namespace.
//
// This type is deprecated in the BSON specification and should not be used
// except for backwards compatibility with ancient applications.
type DBPointer struct {
Namespace string
Id ObjectId
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized,
// and the order of serialized fields will match that of the struct itself.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// In the case of struct values, only exported fields will be deserialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
if raw, ok := out.(*Raw); ok {
raw.Kind = 3
raw.Data = in
return nil
}
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
fallthrough
case reflect.Map:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

1832
vendor/gopkg.in/mgo.v2/bson/bson_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

310
vendor/gopkg.in/mgo.v2/bson/decimal.go generated vendored Normal file
View File

@@ -0,0 +1,310 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package bson
import (
"fmt"
"strconv"
"strings"
)
// Decimal128 holds decimal128 BSON values.
type Decimal128 struct {
h, l uint64
}
func (d Decimal128) String() string {
var pos int // positive sign
var e int // exponent
var h, l uint64 // significand high/low
if d.h>>63&1 == 0 {
pos = 1
}
switch d.h >> 58 & (1<<5 - 1) {
case 0x1F:
return "NaN"
case 0x1E:
return "-Inf"[pos:]
}
l = d.l
if d.h>>61&3 == 3 {
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
// Implicit 0b100 prefix in significand.
e = int(d.h>>47&(1<<14-1)) - 6176
//h = 4<<47 | d.h&(1<<47-1)
// Spec says all of these values are out of range.
h, l = 0, 0
} else {
// Bits: 1*sign 14*exponent 113*significand
e = int(d.h>>49&(1<<14-1)) - 6176
h = d.h & (1<<49 - 1)
}
// Would be handled by the logic below, but that's trivial and common.
if h == 0 && l == 0 && e == 0 {
return "-0"[pos:]
}
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
var last = len(repr)
var i = len(repr)
var dot = len(repr) + e
var rem uint32
Loop:
for d9 := 0; d9 < 5; d9++ {
h, l, rem = divmod(h, l, 1e9)
for d1 := 0; d1 < 9; d1++ {
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
e += len(repr) - i
i--
repr[i] = '.'
last = i - 1
dot = len(repr) // Unmark.
}
c := '0' + byte(rem%10)
rem /= 10
i--
repr[i] = c
// Handle "0E+3", "1E+3", etc.
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
last = i
break Loop
}
if c != '0' {
last = i
}
// Break early. Works without it, but why.
if dot > i && l == 0 && h == 0 && rem == 0 {
break Loop
}
}
}
repr[last-1] = '-'
last--
if e > 0 {
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
}
if e < 0 {
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
}
return string(repr[last+pos:])
}
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
div64 := uint64(div)
a := h >> 32
aq := a / div64
ar := a % div64
b := ar<<32 + h&(1<<32-1)
bq := b / div64
br := b % div64
c := br<<32 + l>>32
cq := c / div64
cr := c % div64
d := cr<<32 + l&(1<<32-1)
dq := d / div64
dr := d % div64
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
}
var dNaN = Decimal128{0x1F << 58, 0}
var dPosInf = Decimal128{0x1E << 58, 0}
var dNegInf = Decimal128{0x3E << 58, 0}
func dErr(s string) (Decimal128, error) {
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
}
func ParseDecimal128(s string) (Decimal128, error) {
orig := s
if s == "" {
return dErr(orig)
}
neg := s[0] == '-'
if neg || s[0] == '+' {
s = s[1:]
}
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
return dNaN, nil
}
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
if neg {
return dNegInf, nil
}
return dPosInf, nil
}
return dErr(orig)
}
var h, l uint64
var e int
var add, ovr uint32
var mul uint32 = 1
var dot = -1
var digits = 0
var i = 0
for i < len(s) {
c := s[i]
if mul == 1e9 {
h, l, ovr = muladd(h, l, mul, add)
mul, add = 1, 0
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if c >= '0' && c <= '9' {
i++
if c > '0' || digits > 0 {
digits++
}
if digits > 34 {
if c == '0' {
// Exact rounding.
e++
continue
}
return dErr(orig)
}
mul *= 10
add *= 10
add += uint32(c - '0')
continue
}
if c == '.' {
i++
if dot >= 0 || i == 1 && len(s) == 1 {
return dErr(orig)
}
if i == len(s) {
break
}
if s[i] < '0' || s[i] > '9' || e > 0 {
return dErr(orig)
}
dot = i
continue
}
break
}
if i == 0 {
return dErr(orig)
}
if mul > 1 {
h, l, ovr = muladd(h, l, mul, add)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if dot >= 0 {
e += dot - i
}
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
i++
eneg := s[i] == '-'
if eneg || s[i] == '+' {
i++
if i == len(s) {
return dErr(orig)
}
}
n := 0
for i < len(s) && n < 1e4 {
c := s[i]
i++
if c < '0' || c > '9' {
return dErr(orig)
}
n *= 10
n += int(c - '0')
}
if eneg {
n = -n
}
e += n
for e < -6176 {
// Subnormal.
var div uint32 = 1
for div < 1e9 && e < -6176 {
div *= 10
e++
}
var rem uint32
h, l, rem = divmod(h, l, div)
if rem > 0 {
return dErr(orig)
}
}
for e > 6111 {
// Clamped.
var mul uint32 = 1
for mul < 1e9 && e > 6111 {
mul *= 10
e--
}
h, l, ovr = muladd(h, l, mul, 0)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if e < -6176 || e > 6111 {
return dErr(orig)
}
}
if i < len(s) {
return dErr(orig)
}
h |= uint64(e+6176) & uint64(1<<14-1) << 49
if neg {
h |= 1 << 63
}
return Decimal128{h, l}, nil
}
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
mul64 := uint64(mul)
a := mul64 * (l & (1<<32 - 1))
b := a>>32 + mul64*(l>>32)
c := b>>32 + mul64*(h&(1<<32-1))
d := c>>32 + mul64*(h>>32)
a = a&(1<<32-1) + uint64(add)
b = b&(1<<32-1) + a>>32
c = c&(1<<32-1) + b>>32
d = d&(1<<32-1) + c>>32
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
}

4109
vendor/gopkg.in/mgo.v2/bson/decimal_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

849
vendor/gopkg.in/mgo.v2/bson/decode.go generated vendored Normal file
View File

@@ -0,0 +1,849 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyles map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyles = make(map[reflect.Type]int)
}
func setterStyle(outt reflect.Type) int {
setterMutex.RLock()
style := setterStyles[outt]
setterMutex.RUnlock()
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyles[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyles[outt] = setterAddr
} else {
setterStyles[outt] = setterNone
}
style = setterStyles[outt]
}
return style
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
style := setterStyle(outt)
if style == setterNone {
return nil
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
if elemType == typeRawDocElem {
d.dropElem(0x04)
return reflect.Zero(t).Interface()
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == 0x03 {
// Delegate unmarshaling of documents.
outt := out.Type()
outk := out.Kind()
switch outk {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
return true
}
if setterStyle(outt) != setterNone {
d.readDocTo(out)
return true
}
if outk == reflect.Slice {
switch outt.Elem() {
case typeDocElem:
out.Set(d.readDocElems(outt))
case typeRawDocElem:
out.Set(d.readRawDocElems(outt))
default:
d.readDocTo(blackHole)
}
return true
}
d.readDocTo(blackHole)
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
if setterStyle(outt) != setterNone {
// Skip the value so its data is handed to the setter below.
d.dropElem(kind)
break
}
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0C:
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x13: // Decimal128
in = Decimal128{
l: uint64(d.readInt64()),
h: uint64(d.readInt64()),
}
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
case reflect.Int, reflect.Int64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatInt(inv.Int(), 10))
return true
}
case reflect.Float64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("can't happen: no uint types in BSON (!?)")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
if outt == typeBinary {
if b, ok := in.([]byte); ok {
out.Set(reflect.ValueOf(Binary{Data: b}))
return true
}
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
b := d.readByte()
if b == 0 {
return false
}
if b == 1 {
return true
}
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
if length < 0 {
corrupted()
}
start := d.i
d.i += int(length)
if d.i < start || d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

514
vendor/gopkg.in/mgo.v2/bson/encode.go generated vendored Normal file
View File

@@ -0,0 +1,514 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"encoding/json"
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
typeJSONNumber = reflect.TypeOf(json.Number(""))
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
if len(raw.Data) == 0 {
panic("Attempted to marshal empty Raw document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
if vt == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := 0; i < v.NumField(); i++ {
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName(0x0A, name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName(0x07, name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName(0x0E, name)
e.addStr(s)
case typeJSONNumber:
n := v.Interface().(json.Number)
if i, err := n.Int64(); err == nil {
e.addElemName(0x12, name)
e.addInt64(i)
} else if f, err := n.Float64(); err == nil {
e.addElemName(0x01, name)
e.addFloat64(f)
} else {
panic("failed to convert json.Number to a number: " + s)
}
default:
e.addElemName(0x02, name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName(0x01, name)
e.addFloat64(v.Float())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName(0x10, name)
e.addInt32(int32(u))
} else {
e.addElemName(0x12, name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName(0x11, name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName(0x7F, name)
} else {
e.addElemName(0xFF, name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName(0x10, name)
e.addInt32(int32(i))
} else {
e.addElemName(0x12, name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName(0x08, name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName(0x03, name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName(0x05, name)
e.addBinary(0x00, v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName(0x03, name)
e.addDoc(v)
} else {
e.addElemName(0x04, name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName(0x05, name)
if v.CanAddr() {
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
} else {
n := v.Len()
e.addInt32(int32(n))
e.addBytes(0x00)
for i := 0; i < n; i++ {
el := v.Index(i)
e.addBytes(byte(el.Uint()))
}
}
} else {
e.addElemName(0x04, name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
panic("Attempted to marshal empty Raw document")
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName(0x05, name)
e.addBinary(s.Kind, s.Data)
case Decimal128:
e.addElemName(0x13, name)
e.addInt64(int64(s.l))
e.addInt64(int64(s.h))
case DBPointer:
e.addElemName(0x0C, name)
e.addStr(s.Namespace)
if len(s.Id) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s.Id)) + ")")
}
e.addBytes([]byte(s.Id)...)
case RegEx:
e.addElemName(0x0B, name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName(0x0D, name)
e.addStr(s.Code)
} else {
e.addElemName(0x0F, name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName(0x09, name)
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
case url.URL:
e.addElemName(0x02, name)
e.addStr(s.String())
case undefined:
e.addElemName(0x06, name)
default:
e.addElemName(0x03, name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addFloat64(v float64) {
e.addInt64(int64(math.Float64bits(v)))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}

380
vendor/gopkg.in/mgo.v2/bson/json.go generated vendored Normal file
View File

@@ -0,0 +1,380 @@
package bson
import (
"bytes"
"encoding/base64"
"fmt"
"gopkg.in/mgo.v2/internal/json"
"strconv"
"time"
)
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func UnmarshalJSON(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&jsonExt)
return d.Decode(value)
}
// MarshalJSON marshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func MarshalJSON(value interface{}) ([]byte, error) {
var buf bytes.Buffer
e := json.NewEncoder(&buf)
e.Extend(&jsonExt)
err := e.Encode(value)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// jdec is used internally by the JSON decoding functions
// so they may unmarshal functions without getting into endless
// recursion due to keyed objects.
func jdec(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&funcExt)
return d.Decode(value)
}
var jsonExt json.Extension
var funcExt json.Extension
// TODO
// - Shell regular expressions ("/regexp/opts")
func init() {
jsonExt.DecodeUnquotedKeys(true)
jsonExt.DecodeTrailingCommas(true)
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
jsonExt.DecodeKeyed("$binary", jdecBinary)
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
jsonExt.EncodeType(Binary{}, jencBinaryType)
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
jsonExt.DecodeKeyed("$date", jdecDate)
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
jsonExt.EncodeType(time.Time{}, jencDate)
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
funcExt.DecodeConst("undefined", Undefined)
jsonExt.DecodeKeyed("$regex", jdecRegEx)
jsonExt.EncodeType(RegEx{}, jencRegEx)
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
jsonExt.DecodeKeyed("$oid", jdecObjectId)
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
jsonExt.EncodeType(ObjectId(""), jencObjectId)
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
jsonExt.EncodeType(int64(0), jencNumberLong)
jsonExt.EncodeType(int(0), jencInt)
funcExt.DecodeConst("MinKey", MinKey)
funcExt.DecodeConst("MaxKey", MaxKey)
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
jsonExt.EncodeType(Undefined, jencUndefined)
jsonExt.Extend(&funcExt)
}
func fbytes(format string, args ...interface{}) []byte {
var buf bytes.Buffer
fmt.Fprintf(&buf, format, args...)
return buf.Bytes()
}
func jdecBinary(data []byte) (interface{}, error) {
var v struct {
Binary []byte `json:"$binary"`
Type string `json:"$type"`
Func struct {
Binary []byte `json:"$binary"`
Type int64 `json:"$type"`
} `json:"$binaryFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
var binData []byte
var binKind int64
if v.Type == "" && v.Binary == nil {
binData = v.Func.Binary
binKind = v.Func.Type
} else if v.Type == "" {
return v.Binary, nil
} else {
binData = v.Binary
binKind, err = strconv.ParseInt(v.Type, 0, 64)
if err != nil {
binKind = -1
}
}
if binKind == 0 {
return binData, nil
}
if binKind < 0 || binKind > 255 {
return nil, fmt.Errorf("invalid type in binary object: %s", data)
}
return Binary{Kind: byte(binKind), Data: binData}, nil
}
func jencBinarySlice(v interface{}) ([]byte, error) {
in := v.([]byte)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
base64.StdEncoding.Encode(out, in)
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
}
func jencBinaryType(v interface{}) ([]byte, error) {
in := v.(Binary)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
base64.StdEncoding.Encode(out, in.Data)
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
}
const jdateFormat = "2006-01-02T15:04:05.999Z"
func jdecDate(data []byte) (interface{}, error) {
var v struct {
S string `json:"$date"`
Func struct {
S string
} `json:"$dateFunc"`
}
_ = jdec(data, &v)
if v.S == "" {
v.S = v.Func.S
}
if v.S != "" {
for _, format := range []string{jdateFormat, "2006-01-02"} {
t, err := time.Parse(format, v.S)
if err == nil {
return t, nil
}
}
return nil, fmt.Errorf("cannot parse date: %q", v.S)
}
var vn struct {
Date struct {
N int64 `json:"$numberLong,string"`
} `json:"$date"`
Func struct {
S int64
} `json:"$dateFunc"`
}
err := jdec(data, &vn)
if err != nil {
return nil, fmt.Errorf("cannot parse date: %q", data)
}
n := vn.Date.N
if n == 0 {
n = vn.Func.S
}
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
}
func jencDate(v interface{}) ([]byte, error) {
t := v.(time.Time)
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
}
func jdecTimestamp(data []byte) (interface{}, error) {
var v struct {
Func struct {
T int32 `json:"t"`
I int32 `json:"i"`
} `json:"$timestamp"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
}
func jencTimestamp(v interface{}) ([]byte, error) {
ts := uint64(v.(MongoTimestamp))
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
}
func jdecRegEx(data []byte) (interface{}, error) {
var v struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return RegEx{v.Regex, v.Options}, nil
}
func jencRegEx(v interface{}) ([]byte, error) {
re := v.(RegEx)
type regex struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
return json.Marshal(regex{re.Pattern, re.Options})
}
func jdecObjectId(data []byte) (interface{}, error) {
var v struct {
Id string `json:"$oid"`
Func struct {
Id string
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.Id == "" {
v.Id = v.Func.Id
}
return ObjectIdHex(v.Id), nil
}
func jencObjectId(v interface{}) ([]byte, error) {
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
}
func jdecDBRef(data []byte) (interface{}, error) {
// TODO Support unmarshaling $ref and $id into the input value.
var v struct {
Obj map[string]interface{} `json:"$dbrefFunc"`
}
// TODO Fix this. Must not be required.
v.Obj = make(map[string]interface{})
err := jdec(data, &v)
if err != nil {
return nil, err
}
return v.Obj, nil
}
func jdecNumberLong(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$numberLong,string"`
Func struct {
N int64 `json:",string"`
} `json:"$numberLongFunc"`
}
var vn struct {
N int64 `json:"$numberLong"`
Func struct {
N int64
} `json:"$numberLongFunc"`
}
err := jdec(data, &v)
if err != nil {
err = jdec(data, &vn)
v.N = vn.N
v.Func.N = vn.Func.N
}
if err != nil {
return nil, err
}
if v.N != 0 {
return v.N, nil
}
return v.Func.N, nil
}
func jencNumberLong(v interface{}) ([]byte, error) {
n := v.(int64)
f := `{"$numberLong":"%d"}`
if n <= 1<<53 {
f = `{"$numberLong":%d}`
}
return fbytes(f, n), nil
}
func jencInt(v interface{}) ([]byte, error) {
n := v.(int)
f := `{"$numberLong":"%d"}`
if int64(n) <= 1<<53 {
f = `%d`
}
return fbytes(f, n), nil
}
func jdecMinKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$minKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $minKey object: %s", data)
}
return MinKey, nil
}
func jdecMaxKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$maxKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
}
return MaxKey, nil
}
func jencMinMaxKey(v interface{}) ([]byte, error) {
switch v.(orderKey) {
case MinKey:
return []byte(`{"$minKey":1}`), nil
case MaxKey:
return []byte(`{"$maxKey":1}`), nil
}
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
}
func jdecUndefined(data []byte) (interface{}, error) {
var v struct {
B bool `json:"$undefined"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if !v.B {
return nil, fmt.Errorf("invalid $undefined object: %s", data)
}
return Undefined, nil
}
func jencUndefined(v interface{}) ([]byte, error) {
return []byte(`{"$undefined":true}`), nil
}

184
vendor/gopkg.in/mgo.v2/bson/json_test.go generated vendored Normal file
View File

@@ -0,0 +1,184 @@
package bson_test
import (
"gopkg.in/mgo.v2/bson"
. "gopkg.in/check.v1"
"reflect"
"strings"
"time"
)
type jsonTest struct {
a interface{} // value encoded into JSON (optional)
b string // JSON expected as output of <a>, and used as input to <c>
c interface{} // Value expected from decoding <b>, defaults to <a>
e string // error string, if decoding (b) should fail
}
var jsonTests = []jsonTest{
// $binary
{
a: []byte("foo"),
b: `{"$binary":"Zm9v","$type":"0x0"}`,
}, {
a: bson.Binary{Kind: 2, Data: []byte("foo")},
b: `{"$binary":"Zm9v","$type":"0x2"}`,
}, {
b: `BinData(2,"Zm9v")`,
c: bson.Binary{Kind: 2, Data: []byte("foo")},
},
// $date
{
a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
}, {
b: `{"$date": {"$numberLong": "1002"}}`,
c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
}, {
b: `ISODate("2016-05-15T01:02:03.004Z")`,
c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
}, {
b: `new Date(1000)`,
c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
}, {
b: `new Date("2016-05-15")`,
c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
},
// $timestamp
{
a: bson.MongoTimestamp(4294967298),
b: `{"$timestamp":{"t":1,"i":2}}`,
}, {
b: `Timestamp(1, 2)`,
c: bson.MongoTimestamp(4294967298),
},
// $regex
{
a: bson.RegEx{"pattern", "options"},
b: `{"$regex":"pattern","$options":"options"}`,
},
// $oid
{
a: bson.ObjectIdHex("0123456789abcdef01234567"),
b: `{"$oid":"0123456789abcdef01234567"}`,
}, {
b: `ObjectId("0123456789abcdef01234567")`,
c: bson.ObjectIdHex("0123456789abcdef01234567"),
},
// $ref (no special type)
{
b: `DBRef("name", "id")`,
c: map[string]interface{}{"$ref": "name", "$id": "id"},
},
// $numberLong
{
a: 123,
b: `123`,
}, {
a: int64(9007199254740992),
b: `{"$numberLong":9007199254740992}`,
}, {
a: int64(1<<53 + 1),
b: `{"$numberLong":"9007199254740993"}`,
}, {
a: 1<<53 + 1,
b: `{"$numberLong":"9007199254740993"}`,
c: int64(9007199254740993),
}, {
b: `NumberLong(9007199254740992)`,
c: int64(1 << 53),
}, {
b: `NumberLong("9007199254740993")`,
c: int64(1<<53 + 1),
},
// $minKey, $maxKey
{
a: bson.MinKey,
b: `{"$minKey":1}`,
}, {
a: bson.MaxKey,
b: `{"$maxKey":1}`,
}, {
b: `MinKey`,
c: bson.MinKey,
}, {
b: `MaxKey`,
c: bson.MaxKey,
}, {
b: `{"$minKey":0}`,
e: `invalid $minKey object: {"$minKey":0}`,
}, {
b: `{"$maxKey":0}`,
e: `invalid $maxKey object: {"$maxKey":0}`,
},
{
a: bson.Undefined,
b: `{"$undefined":true}`,
}, {
b: `undefined`,
c: bson.Undefined,
}, {
b: `{"v": undefined}`,
c: struct{ V interface{} }{bson.Undefined},
},
// Unquoted keys and trailing commas
{
b: `{$foo: ["bar",],}`,
c: map[string]interface{}{"$foo": []interface{}{"bar"}},
},
}
func (s *S) TestJSON(c *C) {
for i, item := range jsonTests {
c.Logf("------------ (#%d)", i)
c.Logf("A: %#v", item.a)
c.Logf("B: %#v", item.b)
if item.c == nil {
item.c = item.a
} else {
c.Logf("C: %#v", item.c)
}
if item.e != "" {
c.Logf("E: %s", item.e)
}
if item.a != nil {
data, err := bson.MarshalJSON(item.a)
c.Assert(err, IsNil)
c.Logf("Dumped: %#v", string(data))
c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
}
var zero interface{}
if item.c == nil {
zero = &struct{}{}
} else {
zero = reflect.New(reflect.TypeOf(item.c)).Interface()
}
err := bson.UnmarshalJSON([]byte(item.b), zero)
if item.e != "" {
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, item.e)
continue
}
c.Assert(err, IsNil)
zerov := reflect.ValueOf(zero)
value := zerov.Interface()
if zerov.Kind() == reflect.Ptr {
value = zerov.Elem().Interface()
}
c.Logf("Loaded: %#v", value)
c.Assert(value, DeepEquals, item.c)
}
}

27
vendor/gopkg.in/mgo.v2/bson/specdata/update.sh generated vendored Executable file
View File

@@ -0,0 +1,27 @@
#!/bin/sh
set -e
if [ ! -d specifications ]; then
git clone -b bson git@github.com:jyemin/specifications
fi
TESTFILE="../specdata_test.go"
cat <<END > $TESTFILE
package bson_test
var specTests = []string{
END
for file in specifications/source/bson/tests/*.yml; do
(
echo '`'
cat $file
echo -n '`,'
) >> $TESTFILE
done
echo '}' >> $TESTFILE
gofmt -w $TESTFILE

241
vendor/gopkg.in/mgo.v2/bson/specdata_test.go generated vendored Normal file
View File

@@ -0,0 +1,241 @@
package bson_test
var specTests = []string{
`
---
description: "Array type"
documents:
-
decoded:
a : []
encoded: 0D000000046100050000000000
-
decoded:
a: [10]
encoded: 140000000461000C0000001030000A0000000000
-
# Decode an array that uses an empty string as the key
decodeOnly : true
decoded:
a: [10]
encoded: 130000000461000B00000010000A0000000000
-
# Decode an array that uses a non-numeric string as the key
decodeOnly : true
decoded:
a: [10]
encoded: 150000000461000D000000106162000A0000000000
`, `
---
description: "Boolean type"
documents:
-
encoded: "090000000862000100"
decoded: { "b" : true }
-
encoded: "090000000862000000"
decoded: { "b" : false }
`, `
---
description: "Corrupted BSON"
documents:
-
encoded: "09000000016600"
error: "truncated double"
-
encoded: "09000000026600"
error: "truncated string"
-
encoded: "09000000036600"
error: "truncated document"
-
encoded: "09000000046600"
error: "truncated array"
-
encoded: "09000000056600"
error: "truncated binary"
-
encoded: "09000000076600"
error: "truncated objectid"
-
encoded: "09000000086600"
error: "truncated boolean"
-
encoded: "09000000096600"
error: "truncated date"
-
encoded: "090000000b6600"
error: "truncated regex"
-
encoded: "090000000c6600"
error: "truncated db pointer"
-
encoded: "0C0000000d6600"
error: "truncated javascript"
-
encoded: "0C0000000e6600"
error: "truncated symbol"
-
encoded: "0C0000000f6600"
error: "truncated javascript with scope"
-
encoded: "0C000000106600"
error: "truncated int32"
-
encoded: "0C000000116600"
error: "truncated timestamp"
-
encoded: "0C000000126600"
error: "truncated int64"
-
encoded: "0400000000"
error: basic
-
encoded: "0500000001"
error: basic
-
encoded: "05000000"
error: basic
-
encoded: "0700000002610078563412"
error: basic
-
encoded: "090000001061000500"
error: basic
-
encoded: "00000000000000000000"
error: basic
-
encoded: "1300000002666f6f00040000006261720000"
error: "basic"
-
encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
error: basic
-
encoded: "1500000003666f6f000c0000000862617200010000"
error: basic
-
encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
error: basic
-
encoded: "1000000002610004000000616263ff00"
error: string is not null-terminated
-
encoded: "0c0000000200000000000000"
error: bad_string_length
-
encoded: "120000000200ffffffff666f6f6261720000"
error: bad_string_length
-
encoded: "0c0000000e00000000000000"
error: bad_string_length
-
encoded: "120000000e00ffffffff666f6f6261720000"
error: bad_string_length
-
encoded: "180000000c00fa5bd841d6585d9900"
error: ""
-
encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
error: bad_string_length
-
encoded: "0c0000000d00000000000000"
error: bad_string_length
-
encoded: "0c0000000d00ffffffff0000"
error: bad_string_length
-
encoded: "1c0000000f001500000000000000000c000000020001000000000000"
error: bad_string_length
-
encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
error: bad_string_length
-
encoded: "1c0000000f001500000001000000000c000000020000000000000000"
error: bad_string_length
-
encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
error: bad_string_length
-
encoded: "0E00000008616263646566676869707172737475"
error: "Run-on CString"
-
encoded: "0100000000"
error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
-
encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
error: "One object, but with object size listed smaller than it is in the data"
-
encoded: "05000000"
error: "One object, missing the EOO at the end"
-
encoded: "0500000001"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
-
encoded: "05000000ff"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
-
encoded: "0500000070"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
-
encoded: "07000000000000"
error: "Invalid BSON type low range"
-
encoded: "07000000800000"
error: "Invalid BSON type high range"
-
encoded: "090000000862000200"
error: "Invalid boolean value of 2"
-
encoded: "09000000086200ff00"
error: "Invalid boolean value of -1"
`, `
---
description: "Int32 type"
documents:
-
decoded:
i: -2147483648
encoded: 0C0000001069000000008000
-
decoded:
i: 2147483647
encoded: 0C000000106900FFFFFF7F00
-
decoded:
i: -1
encoded: 0C000000106900FFFFFFFF00
-
decoded:
i: 0
encoded: 0C0000001069000000000000
-
decoded:
i: 1
encoded: 0C0000001069000100000000
`, `
---
description: "String type"
documents:
-
decoded:
s : ""
encoded: 0D000000027300010000000000
-
decoded:
s: "a"
encoded: 0E00000002730002000000610000
-
decoded:
s: "This is a string"
encoded: 1D0000000273001100000054686973206973206120737472696E670000
-
decoded:
s: "κόσμε"
encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
`}

351
vendor/gopkg.in/mgo.v2/bulk.go generated vendored Normal file
View File

@@ -0,0 +1,351 @@
package mgo
import (
"bytes"
"sort"
"gopkg.in/mgo.v2/bson"
)
// Bulk represents an operation that can be prepared with several
// orthogonal changes before being delivered to the server.
//
// MongoDB servers older than version 2.6 do not have proper support for bulk
// operations, so the driver attempts to map its API as much as possible into
// the functionality that works. In particular, in those releases updates and
// removals are sent individually, and inserts are sent in bulk but have
// suboptimal error reporting compared to more recent versions of the server.
// See the documentation of BulkErrorCase for details on that.
//
// Relevant documentation:
//
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
//
type Bulk struct {
c *Collection
opcount int
actions []bulkAction
ordered bool
}
type bulkOp int
const (
bulkInsert bulkOp = iota + 1
bulkUpdate
bulkUpdateAll
bulkRemove
)
type bulkAction struct {
op bulkOp
docs []interface{}
idxs []int
}
type bulkUpdateOp []interface{}
type bulkDeleteOp []interface{}
// BulkResult holds the results for a bulk operation.
type BulkResult struct {
Matched int
Modified int // Available only for MongoDB 2.6+
// Be conservative while we understand exactly how to report these
// results in a useful and convenient way, and also how to emulate
// them with prior servers.
private bool
}
// BulkError holds an error returned from running a Bulk operation.
// Individual errors may be obtained and inspected via the Cases method.
type BulkError struct {
ecases []BulkErrorCase
}
func (e *BulkError) Error() string {
if len(e.ecases) == 0 {
return "invalid BulkError instance: no errors"
}
if len(e.ecases) == 1 {
return e.ecases[0].Err.Error()
}
msgs := make([]string, 0, len(e.ecases))
seen := make(map[string]bool)
for _, ecase := range e.ecases {
msg := ecase.Err.Error()
if !seen[msg] {
seen[msg] = true
msgs = append(msgs, msg)
}
}
if len(msgs) == 1 {
return msgs[0]
}
var buf bytes.Buffer
buf.WriteString("multiple errors in bulk operation:\n")
for _, msg := range msgs {
buf.WriteString(" - ")
buf.WriteString(msg)
buf.WriteByte('\n')
}
return buf.String()
}
type bulkErrorCases []BulkErrorCase
func (slice bulkErrorCases) Len() int { return len(slice) }
func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
// BulkErrorCase holds an individual error found while attempting a single change
// within a bulk operation, and the position in which it was enqueued.
//
// MongoDB servers older than version 2.6 do not have proper support for bulk
// operations, so the driver attempts to map its API as much as possible into
// the functionality that works. In particular, only the last error is reported
// for bulk inserts and without any positional information, so the Index
// field is set to -1 in these cases.
type BulkErrorCase struct {
Index int // Position of operation that failed, or -1 if unknown.
Err error
}
// Cases returns all individual errors found while attempting the requested changes.
//
// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
func (e *BulkError) Cases() []BulkErrorCase {
return e.ecases
}
// Bulk returns a value to prepare the execution of a bulk operation.
func (c *Collection) Bulk() *Bulk {
return &Bulk{c: c, ordered: true}
}
// Unordered puts the bulk operation in unordered mode.
//
// In unordered mode the indvidual operations may be sent
// out of order, which means latter operations may proceed
// even if prior ones have failed.
func (b *Bulk) Unordered() {
b.ordered = false
}
func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
var action *bulkAction
if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
action = &b.actions[len(b.actions)-1]
} else if !b.ordered {
for i := range b.actions {
if b.actions[i].op == op {
action = &b.actions[i]
break
}
}
}
if action == nil {
b.actions = append(b.actions, bulkAction{op: op})
action = &b.actions[len(b.actions)-1]
}
for i := 0; i < opcount; i++ {
action.idxs = append(action.idxs, b.opcount)
b.opcount++
}
return action
}
// Insert queues up the provided documents for insertion.
func (b *Bulk) Insert(docs ...interface{}) {
action := b.action(bulkInsert, len(docs))
action.docs = append(action.docs, docs...)
}
// Remove queues up the provided selectors for removing matching documents.
// Each selector will remove only a single matching document.
func (b *Bulk) Remove(selectors ...interface{}) {
action := b.action(bulkRemove, len(selectors))
for _, selector := range selectors {
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &deleteOp{
Collection: b.c.FullName,
Selector: selector,
Flags: 1,
Limit: 1,
})
}
}
// RemoveAll queues up the provided selectors for removing all matching documents.
// Each selector will remove all matching documents.
func (b *Bulk) RemoveAll(selectors ...interface{}) {
action := b.action(bulkRemove, len(selectors))
for _, selector := range selectors {
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &deleteOp{
Collection: b.c.FullName,
Selector: selector,
Flags: 0,
Limit: 0,
})
}
}
// Update queues up the provided pairs of updating instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair matches exactly one document for updating at most.
func (b *Bulk) Update(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.Update requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
})
}
}
// UpdateAll queues up the provided pairs of updating instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair updates all documents matching the selector.
func (b *Bulk) UpdateAll(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.UpdateAll requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
Flags: 2,
Multi: true,
})
}
}
// Upsert queues up the provided pairs of upserting instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair matches exactly one document for updating at most.
func (b *Bulk) Upsert(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.Update requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
Flags: 1,
Upsert: true,
})
}
}
// Run runs all the operations queued up.
//
// If an error is reported on an unordered bulk operation, the error value may
// be an aggregation of all issues observed. As an exception to that, Insert
// operations running on MongoDB versions prior to 2.6 will report the last
// error only due to a limitation in the wire protocol.
func (b *Bulk) Run() (*BulkResult, error) {
var result BulkResult
var berr BulkError
var failed bool
for i := range b.actions {
action := &b.actions[i]
var ok bool
switch action.op {
case bulkInsert:
ok = b.runInsert(action, &result, &berr)
case bulkUpdate:
ok = b.runUpdate(action, &result, &berr)
case bulkRemove:
ok = b.runRemove(action, &result, &berr)
default:
panic("unknown bulk operation")
}
if !ok {
failed = true
if b.ordered {
break
}
}
}
if failed {
sort.Sort(bulkErrorCases(berr.ecases))
return nil, &berr
}
return &result, nil
}
func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
op := &insertOp{b.c.FullName, action.docs, 0}
if !b.ordered {
op.flags = 1 // ContinueOnError
}
lerr, err := b.c.writeOp(op, b.ordered)
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
if lerr != nil {
result.Matched += lerr.N
result.Modified += lerr.modified
}
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
if lerr != nil {
result.Matched += lerr.N
result.Modified += lerr.modified
}
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
if lerr != nil && len(lerr.ecases) > 0 {
for i := 0; i < len(lerr.ecases); i++ {
// Map back from the local error index into the visible one.
ecase := lerr.ecases[i]
idx := ecase.Index
if idx >= 0 {
idx = action.idxs[idx]
}
berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
}
return false
} else if err != nil {
for i := 0; i < len(action.idxs); i++ {
berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
}
return false
}
return true
}

504
vendor/gopkg.in/mgo.v2/bulk_test.go generated vendored Normal file
View File

@@ -0,0 +1,504 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
)
func (s *S) TestBulkInsert(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"n": 1})
bulk.Insert(M{"n": 2}, M{"n": 3})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}
func (s *S) TestBulkInsertError(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
c.Assert(mgo.IsDup(err), Equals, true)
type doc struct {
N int `_id`
}
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}})
}
func (s *S) TestBulkInsertErrorUnordered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
type doc struct {
N int `_id`
}
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}
func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
// The server has a batch limit of 1000 documents when using write commands.
// This artificial limit did not exist with the old wire protocol, so to
// avoid compatibility issues the implementation internally split batches
// into the proper size and delivers them one by one. This test ensures that
// the behavior of unordered (that is, continue on error) remains correct
// when errors happen and there are batches left.
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
const total = 4096
type doc struct {
Id int `_id`
}
docs := make([]interface{}, total)
for i := 0; i < total; i++ {
docs[i] = doc{i}
}
docs[1] = doc{0}
bulk.Insert(docs...)
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
n, err := coll.Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, total-1)
var res doc
err = coll.FindId(1500).One(&res)
c.Assert(err, IsNil)
c.Assert(res.Id, Equals, 1500)
}
func (s *S) TestBulkErrorString(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
// If it's just the same string multiple times, join it into a single message.
bulk := coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
c.Assert(mgo.IsDup(err), Equals, true)
// With matching errors but different messages, present them all.
bulk = coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
_, err = bulk.Run()
if s.versionAtLeast(2, 6) {
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$")
c.Assert(err, ErrorMatches, "(?s).*dupone.*")
c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
} else {
// Wire protocol query doesn't return all errors.
c.Assert(err, ErrorMatches, ".*duplicate.*")
}
c.Assert(mgo.IsDup(err), Equals, true)
// With mixed errors, present them all.
bulk = coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
_, err = bulk.Run()
if s.versionAtLeast(2, 6) {
c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$")
} else {
// Wire protocol query doesn't return all errors.
c.Assert(err, ErrorMatches, ".*array.*")
}
c.Assert(mgo.IsDup(err), Equals, false)
}
func (s *S) TestBulkErrorCases_2_6(c *C) {
if !s.versionAtLeast(2, 6) {
c.Skip("2.4- has poor bulk reporting")
}
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
// There's a limit of 1000 operations per command, so
// this forces the more complex indexing logic to act.
for i := 0; i < 1010; i++ {
switch i {
case 3, 14:
bulk.Insert(M{"_id": "dupone"})
case 5, 106:
bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}})
case 7, 1008:
bulk.Insert(M{"_id": "duptwo"})
default:
bulk.Insert(M{"_id": i})
}
}
_, err = bulk.Run()
ecases := err.(*mgo.BulkError).Cases()
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
c.Check(ecases[0].Index, Equals, 14)
c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*")
c.Check(ecases[1].Index, Equals, 106)
c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*")
c.Check(ecases[2].Index, Equals, 1008)
}
func (s *S) TestBulkErrorCases_2_4(c *C) {
if s.versionAtLeast(2, 6) {
c.Skip("2.6+ has better reporting")
}
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
// There's a limit of 1000 operations per command, so
// this forces the more complex indexing logic to act.
for i := 0; i < 1010; i++ {
switch i {
case 3, 14:
bulk.Insert(M{"_id": "dupone"})
case 5:
bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}})
case 106:
bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}})
case 7, 1008:
bulk.Insert(M{"_id": "duptwo"})
default:
bulk.Insert(M{"_id": i})
}
}
_, err = bulk.Run()
ecases := err.(*mgo.BulkError).Cases()
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*")
c.Check(ecases[0].Index, Equals, -1)
c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`)
c.Check(ecases[1].Index, Equals, 106)
}
func (s *S) TestBulkErrorCasesOrdered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
// There's a limit of 1000 operations per command, so
// this forces the more complex indexing logic to act.
for i := 0; i < 20; i++ {
switch i {
case 3, 14:
bulk.Insert(M{"_id": "dupone"})
case 7, 17:
bulk.Insert(M{"_id": "duptwo"})
default:
bulk.Insert(M{"_id": i})
}
}
_, err = bulk.Run()
ecases := err.(*mgo.BulkError).Cases()
c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
if s.versionAtLeast(2, 6) {
c.Check(ecases[0].Index, Equals, 14)
} else {
c.Check(ecases[0].Index, Equals, -1)
}
c.Check(ecases, HasLen, 1)
}
func (s *S) TestBulkUpdate(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r.Matched, Equals, 4)
if s.versionAtLeast(2, 6) {
c.Assert(r.Modified, Equals, 3)
}
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
}
func (s *S) TestBulkUpdateError(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.Update(
M{"n": 1}, M{"$set": M{"n": 10}},
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
M{"n": 3}, M{"$set": M{"n": 30}},
)
r, err := bulk.Run()
c.Assert(err, ErrorMatches, ".*_id.*")
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
}
func (s *S) TestBulkUpdateErrorUnordered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.Unordered()
bulk.Update(
M{"n": 1}, M{"$set": M{"n": 10}},
M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
M{"n": 3}, M{"$set": M{"n": 30}},
)
r, err := bulk.Run()
c.Assert(err, ErrorMatches, ".*_id.*")
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
}
func (s *S) TestBulkUpdateAll(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) // Won't change.
bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r.Matched, Equals, 6)
if s.versionAtLeast(2, 6) {
c.Assert(r.Modified, Equals, 5)
}
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
}
func (s *S) TestBulkMixedUnordered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
// Abuse undefined behavior to ensure the desired implementation is in place.
bulk := coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"n": 1})
bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
bulk.Insert(M{"n": 2})
bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
bulk.Insert(M{"n": 3})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r.Matched, Equals, 3)
if s.versionAtLeast(2, 6) {
c.Assert(r.Modified, Equals, 3)
}
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
}
func (s *S) TestBulkUpsert(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
}
func (s *S) TestBulkRemove(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.Remove(M{"n": 1})
bulk.Remove(M{"n": 2}, M{"n": 4})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r.Matched, Equals, 3)
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{3}, {4}})
}
func (s *S) TestBulkRemoveAll(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
c.Assert(err, IsNil)
bulk := coll.Bulk()
bulk.RemoveAll(M{"n": 1})
bulk.RemoveAll(M{"n": 2}, M{"n": 4})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r.Matched, Equals, 4)
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{3}})
}

682
vendor/gopkg.in/mgo.v2/cluster.go generated vendored Normal file
View File

@@ -0,0 +1,682 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
// ---------------------------------------------------------------------------
// Mongo cluster encapsulation.
//
// A cluster enables the communication with one or more servers participating
// in a mongo cluster. This works with individual servers, a replica set,
// a replica pair, one or multiple mongos routers, etc.
type mongoCluster struct {
sync.RWMutex
serverSynced sync.Cond
userSeeds []string
dynaSeeds []string
servers mongoServers
masters mongoServers
references int
syncing bool
direct bool
failFast bool
syncCount uint
setName string
cachedIndex map[string]bool
sync chan bool
dial dialer
}
func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
cluster := &mongoCluster{
userSeeds: userSeeds,
references: 1,
direct: direct,
failFast: failFast,
dial: dial,
setName: setName,
}
cluster.serverSynced.L = cluster.RWMutex.RLocker()
cluster.sync = make(chan bool, 1)
stats.cluster(+1)
go cluster.syncServersLoop()
return cluster
}
// Acquire increases the reference count for the cluster.
func (cluster *mongoCluster) Acquire() {
cluster.Lock()
cluster.references++
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
cluster.Unlock()
}
// Release decreases the reference count for the cluster. Once
// it reaches zero, all servers will be closed.
func (cluster *mongoCluster) Release() {
cluster.Lock()
if cluster.references == 0 {
panic("cluster.Release() with references == 0")
}
cluster.references--
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
if cluster.references == 0 {
for _, server := range cluster.servers.Slice() {
server.Close()
}
// Wake up the sync loop so it can die.
cluster.syncServers()
stats.cluster(-1)
}
cluster.Unlock()
}
func (cluster *mongoCluster) LiveServers() (servers []string) {
cluster.RLock()
for _, serv := range cluster.servers.Slice() {
servers = append(servers, serv.Addr)
}
cluster.RUnlock()
return servers
}
func (cluster *mongoCluster) removeServer(server *mongoServer) {
cluster.Lock()
cluster.masters.Remove(server)
other := cluster.servers.Remove(server)
cluster.Unlock()
if other != nil {
other.Close()
log("Removed server ", server.Addr, " from cluster.")
}
server.Close()
}
type isMasterResult struct {
IsMaster bool
Secondary bool
Primary string
Hosts []string
Passives []string
Tags bson.D
Msg string
SetName string `bson:"setName"`
MaxWireVersion int `bson:"maxWireVersion"`
}
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
// Monotonic let's it talk to a slave and still hold the socket.
session := newSession(Monotonic, cluster, 10*time.Second)
session.setSocket(socket)
err := session.Run("ismaster", result)
session.Close()
return err
}
type possibleTimeout interface {
Timeout() bool
}
var syncSocketTimeout = 5 * time.Second
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
var syncTimeout time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
syncTimeout = syncSocketTimeout
globalMutex.Unlock()
} else {
syncTimeout = syncSocketTimeout
}
addr := server.Addr
log("SYNC Processing ", addr, "...")
// Retry a few times to avoid knocking a server down for a hiccup.
var result isMasterResult
var tryerr error
for retry := 0; ; retry++ {
if retry == 3 || retry == 1 && cluster.failFast {
return nil, nil, tryerr
}
if retry > 0 {
// Don't abuse the server needlessly if there's something actually wrong.
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
// Give a chance for waiters to timeout as well.
cluster.serverSynced.Broadcast()
}
time.Sleep(syncShortDelay)
}
// It's not clear what would be a good timeout here. Is it
// better to wait longer or to retry?
socket, _, err := server.AcquireSocket(0, syncTimeout)
if err != nil {
tryerr = err
logf("SYNC Failed to get socket to %s: %v", addr, err)
continue
}
err = cluster.isMaster(socket, &result)
socket.Release()
if err != nil {
tryerr = err
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
continue
}
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
break
}
if cluster.setName != "" && result.SetName != cluster.setName {
logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
}
if result.IsMaster {
debugf("SYNC %s is a master.", addr)
if !server.info.Master {
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
stats.conn(+1, true)
}
} else if result.Secondary {
debugf("SYNC %s is a slave.", addr)
} else if cluster.direct {
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
} else {
logf("SYNC %s is neither a master nor a slave.", addr)
// Let stats track it as whatever was known before.
return nil, nil, errors.New(addr + " is not a master nor slave")
}
info = &mongoServerInfo{
Master: result.IsMaster,
Mongos: result.Msg == "isdbgrid",
Tags: result.Tags,
SetName: result.SetName,
MaxWireVersion: result.MaxWireVersion,
}
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
if result.Primary != "" {
// First in the list to speed up master discovery.
hosts = append(hosts, result.Primary)
}
hosts = append(hosts, result.Hosts...)
hosts = append(hosts, result.Passives...)
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
return info, hosts, nil
}
type syncKind bool
const (
completeSync syncKind = true
partialSync syncKind = false
)
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
cluster.Lock()
current := cluster.servers.Search(server.ResolvedAddr)
if current == nil {
if syncKind == partialSync {
cluster.Unlock()
server.Close()
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
return
}
cluster.servers.Add(server)
if info.Master {
cluster.masters.Add(server)
log("SYNC Adding ", server.Addr, " to cluster as a master.")
} else {
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
}
} else {
if server != current {
panic("addServer attempting to add duplicated server")
}
if server.Info().Master != info.Master {
if info.Master {
log("SYNC Server ", server.Addr, " is now a master.")
cluster.masters.Add(server)
} else {
log("SYNC Server ", server.Addr, " is now a slave.")
cluster.masters.Remove(server)
}
}
}
server.SetInfo(info)
debugf("SYNC Broadcasting availability of server %s", server.Addr)
cluster.serverSynced.Broadcast()
cluster.Unlock()
}
func (cluster *mongoCluster) getKnownAddrs() []string {
cluster.RLock()
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
seen := make(map[string]bool, max)
known := make([]string, 0, max)
add := func(addr string) {
if _, found := seen[addr]; !found {
seen[addr] = true
known = append(known, addr)
}
}
for _, addr := range cluster.userSeeds {
add(addr)
}
for _, addr := range cluster.dynaSeeds {
add(addr)
}
for _, serv := range cluster.servers.Slice() {
add(serv.Addr)
}
cluster.RUnlock()
return known
}
// syncServers injects a value into the cluster.sync channel to force
// an iteration of the syncServersLoop function.
func (cluster *mongoCluster) syncServers() {
select {
case cluster.sync <- true:
default:
}
}
// How long to wait for a checkup of the cluster topology if nothing
// else kicks a synchronization before that.
const syncServersDelay = 30 * time.Second
const syncShortDelay = 500 * time.Millisecond
// syncServersLoop loops while the cluster is alive to keep its idea of
// the server topology up-to-date. It must be called just once from
// newCluster. The loop iterates once syncServersDelay has passed, or
// if somebody injects a value into the cluster.sync channel to force a
// synchronization. A loop iteration will contact all servers in
// parallel, ask them about known peers and their own role within the
// cluster, and then attempt to do the same with all the peers
// retrieved.
func (cluster *mongoCluster) syncServersLoop() {
for {
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.references++ // Keep alive while syncing.
direct := cluster.direct
cluster.Unlock()
cluster.syncServersIteration(direct)
// We just synchronized, so consume any outstanding requests.
select {
case <-cluster.sync:
default:
}
cluster.Release()
// Hold off before allowing another sync. No point in
// burning CPU looking for down servers.
if !cluster.failFast {
time.Sleep(syncShortDelay)
}
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.syncCount++
// Poke all waiters so they have a chance to timeout or
// restart syncing if they wish to.
cluster.serverSynced.Broadcast()
// Check if we have to restart immediately either way.
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
cluster.Unlock()
if restart {
log("SYNC No masters found. Will synchronize again.")
time.Sleep(syncShortDelay)
continue
}
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
// Hold off until somebody explicitly requests a synchronization
// or it's time to check for a cluster topology change again.
select {
case <-cluster.sync:
case <-time.After(syncServersDelay):
}
}
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
}
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
cluster.RLock()
server := cluster.servers.Search(tcpaddr.String())
cluster.RUnlock()
if server != nil {
return server
}
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
}
func resolveAddr(addr string) (*net.TCPAddr, error) {
// Simple cases that do not need actual resolution. Works with IPv4 and v6.
if host, port, err := net.SplitHostPort(addr); err == nil {
if port, _ := strconv.Atoi(port); port > 0 {
zone := ""
if i := strings.LastIndex(host, "%"); i >= 0 {
zone = host[i+1:]
host = host[:i]
}
ip := net.ParseIP(host)
if ip != nil {
return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
}
}
}
// Attempt to resolve IPv4 and v6 concurrently.
addrChan := make(chan *net.TCPAddr, 2)
for _, network := range []string{"udp4", "udp6"} {
network := network
go func() {
// The unfortunate UDP dialing hack allows having a timeout on address resolution.
conn, err := net.DialTimeout(network, addr, 10*time.Second)
if err != nil {
addrChan <- nil
} else {
addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
conn.Close()
}
}()
}
// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
tcpaddr := <-addrChan
if tcpaddr == nil || len(tcpaddr.IP) != 4 {
var timeout <-chan time.Time
if tcpaddr != nil {
// Don't wait too long if an IPv6 address is known.
timeout = time.After(50 * time.Millisecond)
}
select {
case <-timeout:
case tcpaddr2 := <-addrChan:
if tcpaddr == nil || tcpaddr2 != nil {
// It's an IPv4 address or the only known address. Use it.
tcpaddr = tcpaddr2
}
}
}
if tcpaddr == nil {
log("SYNC Failed to resolve server address: ", addr)
return nil, errors.New("failed to resolve server address: " + addr)
}
if tcpaddr.String() != addr {
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
}
return tcpaddr, nil
}
type pendingAdd struct {
server *mongoServer
info *mongoServerInfo
}
func (cluster *mongoCluster) syncServersIteration(direct bool) {
log("SYNC Starting full topology synchronization...")
var wg sync.WaitGroup
var m sync.Mutex
notYetAdded := make(map[string]pendingAdd)
addIfFound := make(map[string]bool)
seen := make(map[string]bool)
syncKind := partialSync
var spawnSync func(addr string, byMaster bool)
spawnSync = func(addr string, byMaster bool) {
wg.Add(1)
go func() {
defer wg.Done()
tcpaddr, err := resolveAddr(addr)
if err != nil {
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
return
}
resolvedAddr := tcpaddr.String()
m.Lock()
if byMaster {
if pending, ok := notYetAdded[resolvedAddr]; ok {
delete(notYetAdded, resolvedAddr)
m.Unlock()
cluster.addServer(pending.server, pending.info, completeSync)
return
}
addIfFound[resolvedAddr] = true
}
if seen[resolvedAddr] {
m.Unlock()
return
}
seen[resolvedAddr] = true
m.Unlock()
server := cluster.server(addr, tcpaddr)
info, hosts, err := cluster.syncServer(server)
if err != nil {
cluster.removeServer(server)
return
}
m.Lock()
add := direct || info.Master || addIfFound[resolvedAddr]
if add {
syncKind = completeSync
} else {
notYetAdded[resolvedAddr] = pendingAdd{server, info}
}
m.Unlock()
if add {
cluster.addServer(server, info, completeSync)
}
if !direct {
for _, addr := range hosts {
spawnSync(addr, info.Master)
}
}
}()
}
knownAddrs := cluster.getKnownAddrs()
for _, addr := range knownAddrs {
spawnSync(addr, false)
}
wg.Wait()
if syncKind == completeSync {
logf("SYNC Synchronization was complete (got data from primary).")
for _, pending := range notYetAdded {
cluster.removeServer(pending.server)
}
} else {
logf("SYNC Synchronization was partial (cannot talk to primary).")
for _, pending := range notYetAdded {
cluster.addServer(pending.server, pending.info, partialSync)
}
}
cluster.Lock()
mastersLen := cluster.masters.Len()
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
// Update dynamic seeds, but only if we have any good servers. Otherwise,
// leave them alone for better chances of a successful sync in the future.
if syncKind == completeSync {
dynaSeeds := make([]string, cluster.servers.Len())
for i, server := range cluster.servers.Slice() {
dynaSeeds[i] = server.Addr
}
cluster.dynaSeeds = dynaSeeds
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
}
cluster.Unlock()
}
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
// true, it will attempt to return a socket to a slave server. If it is
// false, the socket will necessarily be to a master server.
func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
var started time.Time
var syncCount uint
warnedLimit := false
for {
cluster.RLock()
for {
mastersLen := cluster.masters.Len()
slavesLen := cluster.servers.Len() - mastersLen
debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
break
}
if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
break
}
if started.IsZero() {
// Initialize after fast path above.
started = time.Now()
syncCount = cluster.syncCount
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
cluster.RUnlock()
return nil, errors.New("no reachable servers")
}
log("Waiting for servers to synchronize...")
cluster.syncServers()
// Remember: this will release and reacquire the lock.
cluster.serverSynced.Wait()
}
var server *mongoServer
if slaveOk {
server = cluster.servers.BestFit(mode, serverTags)
} else {
server = cluster.masters.BestFit(mode, nil)
}
cluster.RUnlock()
if server == nil {
// Must have failed the requested tags. Sleep to avoid spinning.
time.Sleep(1e8)
continue
}
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
if err == errPoolLimit {
if !warnedLimit {
warnedLimit = true
log("WARNING: Per-server connection limit reached.")
}
time.Sleep(100 * time.Millisecond)
continue
}
if err != nil {
cluster.removeServer(server)
cluster.syncServers()
continue
}
if abended && !slaveOk {
var result isMasterResult
err := cluster.isMaster(s, &result)
if err != nil || !result.IsMaster {
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
s.Release()
cluster.syncServers()
time.Sleep(100 * time.Millisecond)
continue
}
}
return s, nil
}
panic("unreached")
}
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
cluster.Lock()
if cluster.cachedIndex == nil {
cluster.cachedIndex = make(map[string]bool)
}
if exists {
cluster.cachedIndex[cacheKey] = true
} else {
delete(cluster.cachedIndex, cacheKey)
}
cluster.Unlock()
}
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
cluster.RLock()
if cluster.cachedIndex != nil {
result = cluster.cachedIndex[cacheKey]
}
cluster.RUnlock()
return
}
func (cluster *mongoCluster) ResetIndexCache() {
cluster.Lock()
cluster.cachedIndex = make(map[string]bool)
cluster.Unlock()
}

2090
vendor/gopkg.in/mgo.v2/cluster_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

196
vendor/gopkg.in/mgo.v2/dbtest/dbserver.go generated vendored Normal file
View File

@@ -0,0 +1,196 @@
package dbtest
import (
"bytes"
"fmt"
"net"
"os"
"os/exec"
"strconv"
"time"
"gopkg.in/mgo.v2"
"gopkg.in/tomb.v2"
)
// DBServer controls a MongoDB server process to be used within test suites.
//
// The test server is started when Session is called the first time and should
// remain running for the duration of all tests, with the Wipe method being
// called between tests (before each of them) to clear stored data. After all tests
// are done, the Stop method should be called to stop the test server.
//
// Before the DBServer is used the SetPath method must be called to define
// the location for the database files to be stored.
type DBServer struct {
session *mgo.Session
output bytes.Buffer
server *exec.Cmd
dbpath string
host string
tomb tomb.Tomb
}
// SetPath defines the path to the directory where the database files will be
// stored if it is started. The directory path itself is not created or removed
// by the test helper.
func (dbs *DBServer) SetPath(dbpath string) {
dbs.dbpath = dbpath
}
func (dbs *DBServer) start() {
if dbs.server != nil {
panic("DBServer already started")
}
if dbs.dbpath == "" {
panic("DBServer.SetPath must be called before using the server")
}
mgo.SetStats(true)
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic("unable to listen on a local address: " + err.Error())
}
addr := l.Addr().(*net.TCPAddr)
l.Close()
dbs.host = addr.String()
args := []string{
"--dbpath", dbs.dbpath,
"--bind_ip", "127.0.0.1",
"--port", strconv.Itoa(addr.Port),
"--nssize", "1",
"--noprealloc",
"--smallfiles",
"--nojournal",
}
dbs.tomb = tomb.Tomb{}
dbs.server = exec.Command("mongod", args...)
dbs.server.Stdout = &dbs.output
dbs.server.Stderr = &dbs.output
err = dbs.server.Start()
if err != nil {
panic(err)
}
dbs.tomb.Go(dbs.monitor)
dbs.Wipe()
}
func (dbs *DBServer) monitor() error {
dbs.server.Process.Wait()
if dbs.tomb.Alive() {
// Present some debugging information.
fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n")
fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes())
fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n")
cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod")
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Fprintf(os.Stderr, "----------------------------------------\n")
panic("mongod process died unexpectedly")
}
return nil
}
// Stop stops the test server process, if it is running.
//
// It's okay to call Stop multiple times. After the test server is
// stopped it cannot be restarted.
//
// All database sessions must be closed before or while the Stop method
// is running. Otherwise Stop will panic after a timeout informing that
// there is a session leak.
func (dbs *DBServer) Stop() {
if dbs.session != nil {
dbs.checkSessions()
if dbs.session != nil {
dbs.session.Close()
dbs.session = nil
}
}
if dbs.server != nil {
dbs.tomb.Kill(nil)
dbs.server.Process.Signal(os.Interrupt)
select {
case <-dbs.tomb.Dead():
case <-time.After(5 * time.Second):
panic("timeout waiting for mongod process to die")
}
dbs.server = nil
}
}
// Session returns a new session to the server. The returned session
// must be closed after the test is done with it.
//
// The first Session obtained from a DBServer will start it.
func (dbs *DBServer) Session() *mgo.Session {
if dbs.server == nil {
dbs.start()
}
if dbs.session == nil {
mgo.ResetStats()
var err error
dbs.session, err = mgo.Dial(dbs.host + "/test")
if err != nil {
panic(err)
}
}
return dbs.session.Copy()
}
// checkSessions ensures all mgo sessions opened were properly closed.
// For slightly faster tests, it may be disabled setting the
// environmnet variable CHECK_SESSIONS to 0.
func (dbs *DBServer) checkSessions() {
if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil {
return
}
dbs.session.Close()
dbs.session = nil
for i := 0; i < 100; i++ {
stats := mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
return
}
time.Sleep(100 * time.Millisecond)
}
panic("There are mgo sessions still alive.")
}
// Wipe drops all created databases and their data.
//
// The MongoDB server remains running if it was prevoiusly running,
// or stopped if it was previously stopped.
//
// All database sessions must be closed before or while the Wipe method
// is running. Otherwise Wipe will panic after a timeout informing that
// there is a session leak.
func (dbs *DBServer) Wipe() {
if dbs.server == nil || dbs.session == nil {
return
}
dbs.checkSessions()
sessionUnset := dbs.session == nil
session := dbs.Session()
defer session.Close()
if sessionUnset {
dbs.session.Close()
dbs.session = nil
}
names, err := session.DatabaseNames()
if err != nil {
panic(err)
}
for _, name := range names {
switch name {
case "admin", "local", "config":
default:
err = session.DB(name).DropDatabase()
if err != nil {
panic(err)
}
}
}
}

108
vendor/gopkg.in/mgo.v2/dbtest/dbserver_test.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
package dbtest_test
import (
"os"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/dbtest"
)
type M map[string]interface{}
func TestAll(t *testing.T) {
TestingT(t)
}
type S struct {
oldCheckSessions string
}
var _ = Suite(&S{})
func (s *S) SetUpTest(c *C) {
s.oldCheckSessions = os.Getenv("CHECK_SESSIONS")
os.Setenv("CHECK_SESSIONS", "")
}
func (s *S) TearDownTest(c *C) {
os.Setenv("CHECK_SESSIONS", s.oldCheckSessions)
}
func (s *S) TestWipeData(c *C) {
var server dbtest.DBServer
server.SetPath(c.MkDir())
defer server.Stop()
session := server.Session()
err := session.DB("mydb").C("mycoll").Insert(M{"a": 1})
session.Close()
c.Assert(err, IsNil)
server.Wipe()
session = server.Session()
names, err := session.DatabaseNames()
session.Close()
c.Assert(err, IsNil)
for _, name := range names {
if name != "local" && name != "admin" {
c.Fatalf("Wipe should have removed this database: %s", name)
}
}
}
func (s *S) TestStop(c *C) {
var server dbtest.DBServer
server.SetPath(c.MkDir())
defer server.Stop()
// Server should not be running.
process := server.ProcessTest()
c.Assert(process, IsNil)
session := server.Session()
addr := session.LiveServers()[0]
session.Close()
// Server should be running now.
process = server.ProcessTest()
p, err := os.FindProcess(process.Pid)
c.Assert(err, IsNil)
p.Release()
server.Stop()
// Server should not be running anymore.
session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond)
if session != nil {
session.Close()
c.Fatalf("Stop did not stop the server")
}
}
func (s *S) TestCheckSessions(c *C) {
var server dbtest.DBServer
server.SetPath(c.MkDir())
defer server.Stop()
session := server.Session()
defer session.Close()
c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.")
}
func (s *S) TestCheckSessionsDisabled(c *C) {
var server dbtest.DBServer
server.SetPath(c.MkDir())
defer server.Stop()
os.Setenv("CHECK_SESSIONS", "0")
// Should not panic, although it looks to Wipe like this session will leak.
session := server.Session()
defer session.Close()
server.Wipe()
}

12
vendor/gopkg.in/mgo.v2/dbtest/export_test.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
package dbtest
import (
"os"
)
func (dbs *DBServer) ProcessTest() *os.Process {
if dbs.server == nil {
return nil
}
return dbs.server.Process
}

31
vendor/gopkg.in/mgo.v2/doc.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Package mgo offers a rich MongoDB driver for Go.
//
// Details about the mgo project (pronounced as "mango") are found
// in its web page:
//
// http://labix.org/mgo
//
// Usage of the driver revolves around the concept of sessions. To
// get started, obtain a session using the Dial function:
//
// session, err := mgo.Dial(url)
//
// This will establish one or more connections with the cluster of
// servers defined by the url parameter. From then on, the cluster
// may be queried with multiple consistency rules (see SetMode) and
// documents retrieved with statements such as:
//
// c := session.DB(database).C(collection)
// err := c.Find(query).One(&result)
//
// New sessions are typically created by calling session.Copy on the
// initial session obtained at dial time. These new sessions will share
// the same cluster information and connection pool, and may be easily
// handed into other methods and functions for organizing logic.
// Every session created must have its Close method called at the end
// of its life time, so its resources may be put back in the pool or
// collected, depending on the case.
//
// For more details, see the documentation for the types and methods.
//
package mgo

33
vendor/gopkg.in/mgo.v2/export_test.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
package mgo
import (
"time"
)
func HackPingDelay(newDelay time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldDelay := pingDelay
restore = func() {
globalMutex.Lock()
pingDelay = oldDelay
globalMutex.Unlock()
}
pingDelay = newDelay
return
}
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldTimeout := syncSocketTimeout
restore = func() {
globalMutex.Lock()
syncSocketTimeout = oldTimeout
globalMutex.Unlock()
}
syncSocketTimeout = newTimeout
return
}

761
vendor/gopkg.in/mgo.v2/gridfs.go generated vendored Normal file
View File

@@ -0,0 +1,761 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"encoding/hex"
"errors"
"hash"
"io"
"os"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
type GridFS struct {
Files *Collection
Chunks *Collection
}
type gfsFileMode int
const (
gfsClosed gfsFileMode = 0
gfsReading gfsFileMode = 1
gfsWriting gfsFileMode = 2
)
type GridFile struct {
m sync.Mutex
c sync.Cond
gfs *GridFS
mode gfsFileMode
err error
chunk int
offset int64
wpending int
wbuf []byte
wsum hash.Hash
rbuf []byte
rcache *gfsCachedChunk
doc gfsFile
}
type gfsFile struct {
Id interface{} "_id"
ChunkSize int "chunkSize"
UploadDate time.Time "uploadDate"
Length int64 ",minsize"
MD5 string
Filename string ",omitempty"
ContentType string "contentType,omitempty"
Metadata *bson.Raw ",omitempty"
}
type gfsChunk struct {
Id interface{} "_id"
FilesId interface{} "files_id"
N int
Data []byte
}
type gfsCachedChunk struct {
wait sync.Mutex
n int
data []byte
err error
}
func newGridFS(db *Database, prefix string) *GridFS {
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
}
func (gfs *GridFS) newFile() *GridFile {
file := &GridFile{gfs: gfs}
file.c.L = &file.m
//runtime.SetFinalizer(file, finalizeFile)
return file
}
func finalizeFile(file *GridFile) {
file.Close()
}
// Create creates a new file with the provided name in the GridFS. If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods. If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// n, err := file.Write([]byte("Hello world!"))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation. For example:
//
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// messages, err := os.Open("/var/log/messages")
// check(err)
// defer messages.Close()
// err = io.Copy(file, messages)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
file = gfs.newFile()
file.mode = gfsWriting
file.wsum = md5.New()
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
return
}
// OpenId returns the file with the provided id, for reading.
// If the file isn't found, err will be set to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// Open returns the most recently uploaded file with the provided
// name, for reading. If the file isn't found, err will be set
// to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// OpenNext opens the next file from iter for reading, sets *file to it,
// and returns true on the success case. If no more documents are available
// on iter or an error occurred, *file is set to nil and the result is false.
// Errors will be available via iter.Err().
//
// The iter parameter must be an iterator on the GridFS files collection.
// Using the GridFS.Find method is an easy way to obtain such an iterator,
// but any iterator on the collection will work.
//
// If the provided *file is non-nil, OpenNext will close it before attempting
// to iterate to the next element. This means that in a loop one only
// has to worry about closing files when breaking out of the loop early
// (break, return, or panic).
//
// For example:
//
// gfs := db.GridFS("fs")
// query := gfs.Find(nil).Sort("filename")
// iter := query.Iter()
// var f *mgo.GridFile
// for gfs.OpenNext(iter, &f) {
// fmt.Printf("Filename: %s\n", f.Name())
// }
// if iter.Close() != nil {
// panic(iter.Close())
// }
//
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
if *file != nil {
// Ignoring the error here shouldn't be a big deal
// as we're reading the file and the loop iteration
// for this file is finished.
_ = (*file).Close()
}
var doc gfsFile
if !iter.Next(&doc) {
*file = nil
return false
}
f := gfs.newFile()
f.mode = gfsReading
f.doc = doc
*file = f
return true
}
// Find runs query on GridFS's files collection and returns
// the resulting Query.
//
// This logic:
//
// gfs := db.GridFS("fs")
// iter := gfs.Find(nil).Iter()
//
// Is equivalent to:
//
// files := db.C("fs" + ".files")
// iter := files.Find(nil).Iter()
//
func (gfs *GridFS) Find(query interface{}) *Query {
return gfs.Files.Find(query)
}
// RemoveId deletes the file with the provided id from the GridFS.
func (gfs *GridFS) RemoveId(id interface{}) error {
err := gfs.Files.Remove(bson.M{"_id": id})
if err != nil {
return err
}
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
return err
}
type gfsDocId struct {
Id interface{} "_id"
}
// Remove deletes all files with the provided name from the GridFS.
func (gfs *GridFS) Remove(name string) (err error) {
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
var doc gfsDocId
for iter.Next(&doc) {
if e := gfs.RemoveId(doc.Id); e != nil {
err = e
}
}
if err == nil {
err = iter.Close()
}
return err
}
func (file *GridFile) assertMode(mode gfsFileMode) {
switch file.mode {
case mode:
return
case gfsWriting:
panic("GridFile is open for writing")
case gfsReading:
panic("GridFile is open for reading")
case gfsClosed:
panic("GridFile is closed")
default:
panic("internal error: missing GridFile mode")
}
}
// SetChunkSize sets size of saved chunks. Once the file is written to, it
// will be split in blocks of that size and each block saved into an
// independent chunk document. The default chunk size is 255kb.
//
// It is a runtime error to call this function once the file has started
// being written to.
func (file *GridFile) SetChunkSize(bytes int) {
file.assertMode(gfsWriting)
debugf("GridFile %p: setting chunk size to %d", file, bytes)
file.m.Lock()
file.doc.ChunkSize = bytes
file.m.Unlock()
}
// Id returns the current file Id.
func (file *GridFile) Id() interface{} {
return file.doc.Id
}
// SetId changes the current file Id.
//
// It is a runtime error to call this function once the file has started
// being written to, or when the file is not open for writing.
func (file *GridFile) SetId(id interface{}) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Id = id
file.m.Unlock()
}
// Name returns the optional file name. An empty string will be returned
// in case it is unset.
func (file *GridFile) Name() string {
return file.doc.Filename
}
// SetName changes the optional file name. An empty string may be used to
// unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetName(name string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Filename = name
file.m.Unlock()
}
// ContentType returns the optional file content type. An empty string will be
// returned in case it is unset.
func (file *GridFile) ContentType() string {
return file.doc.ContentType
}
// ContentType changes the optional file content type. An empty string may be
// used to unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetContentType(ctype string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.ContentType = ctype
file.m.Unlock()
}
// GetMeta unmarshals the optional "metadata" field associated with the
// file into the result parameter. The meaning of keys under that field
// is user-defined. For example:
//
// result := struct{ INode int }{}
// err = file.GetMeta(&result)
// if err != nil {
// panic(err.String())
// }
// fmt.Printf("inode: %d\n", result.INode)
//
func (file *GridFile) GetMeta(result interface{}) (err error) {
file.m.Lock()
if file.doc.Metadata != nil {
err = bson.Unmarshal(file.doc.Metadata.Data, result)
}
file.m.Unlock()
return
}
// SetMeta changes the optional "metadata" field associated with the
// file. The meaning of keys under that field is user-defined.
// For example:
//
// file.SetMeta(bson.M{"inode": inode})
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetMeta(metadata interface{}) {
file.assertMode(gfsWriting)
data, err := bson.Marshal(metadata)
file.m.Lock()
if err != nil && file.err == nil {
file.err = err
} else {
file.doc.Metadata = &bson.Raw{Data: data}
}
file.m.Unlock()
}
// Size returns the file size in bytes.
func (file *GridFile) Size() (bytes int64) {
file.m.Lock()
bytes = file.doc.Length
file.m.Unlock()
return
}
// MD5 returns the file MD5 as a hex-encoded string.
func (file *GridFile) MD5() (md5 string) {
return file.doc.MD5
}
// UploadDate returns the file upload time.
func (file *GridFile) UploadDate() time.Time {
return file.doc.UploadDate
}
// SetUploadDate changes the file upload time.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetUploadDate(t time.Time) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.UploadDate = t
file.m.Unlock()
}
// Close flushes any pending changes in case the file is being written
// to, waits for any background operations to finish, and closes the file.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
func (file *GridFile) Close() (err error) {
file.m.Lock()
defer file.m.Unlock()
if file.mode == gfsWriting {
if len(file.wbuf) > 0 && file.err == nil {
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
file.completeWrite()
} else if file.mode == gfsReading && file.rcache != nil {
file.rcache.wait.Lock()
file.rcache = nil
}
file.mode = gfsClosed
debugf("GridFile %p: closed", file)
return file.err
}
func (file *GridFile) completeWrite() {
for file.wpending > 0 {
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
file.c.Wait()
}
if file.err == nil {
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
if file.doc.UploadDate.IsZero() {
file.doc.UploadDate = bson.Now()
}
file.doc.MD5 = hexsum
file.err = file.gfs.Files.Insert(file.doc)
}
if file.err != nil {
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
}
if file.err == nil {
index := Index{
Key: []string{"files_id", "n"},
Unique: true,
}
file.err = file.gfs.Chunks.EnsureIndex(index)
}
}
// Abort cancels an in-progress write, preventing the file from being
// automically created and ensuring previously written chunks are
// removed when the file is closed.
//
// It is a runtime error to call Abort when the file was not opened
// for writing.
func (file *GridFile) Abort() {
if file.mode != gfsWriting {
panic("file.Abort must be called on file opened for writing")
}
file.err = errors.New("write aborted")
}
// Write writes the provided data to the file and returns the
// number of bytes written and an error in case something
// wrong happened.
//
// The file will internally cache the data so that all but the last
// chunk sent to the database have the size defined by SetChunkSize.
// This also means that errors may be deferred until a future call
// to Write or Close.
//
// The parameters and behavior of this function turn the file
// into an io.Writer.
func (file *GridFile) Write(data []byte) (n int, err error) {
file.assertMode(gfsWriting)
file.m.Lock()
debugf("GridFile %p: writing %d bytes", file, len(data))
defer file.m.Unlock()
if file.err != nil {
return 0, file.err
}
n = len(data)
file.doc.Length += int64(n)
chunkSize := file.doc.ChunkSize
if len(file.wbuf)+len(data) < chunkSize {
file.wbuf = append(file.wbuf, data...)
return
}
// First, flush file.wbuf complementing with data.
if len(file.wbuf) > 0 {
missing := chunkSize - len(file.wbuf)
if missing > len(data) {
missing = len(data)
}
file.wbuf = append(file.wbuf, data[:missing]...)
data = data[missing:]
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
// Then, flush all chunks from data without copying.
for len(data) > chunkSize {
size := chunkSize
if size > len(data) {
size = len(data)
}
file.insertChunk(data[:size])
data = data[size:]
}
// And append the rest for a future call.
file.wbuf = append(file.wbuf, data...)
return n, file.err
}
func (file *GridFile) insertChunk(data []byte) {
n := file.chunk
file.chunk++
debugf("GridFile %p: adding to checksum: %q", file, string(data))
file.wsum.Write(data)
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
// Hold on.. we got a MB pending.
file.c.Wait()
if file.err != nil {
return
}
}
file.wpending++
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
// We may not own the memory of data, so rather than
// simply copying it, we'll marshal the document ahead of time.
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
if err != nil {
file.err = err
return
}
go func() {
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
file.m.Lock()
file.wpending--
if err != nil && file.err == nil {
file.err = err
}
file.c.Broadcast()
file.m.Unlock()
}()
}
// Seek sets the offset for the next Read or Write on file to
// offset, interpreted according to whence: 0 means relative to
// the origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end. It returns the new offset and
// an error, if any.
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
file.m.Lock()
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
defer file.m.Unlock()
switch whence {
case os.SEEK_SET:
case os.SEEK_CUR:
offset += file.offset
case os.SEEK_END:
offset += file.doc.Length
default:
panic("unsupported whence value")
}
if offset > file.doc.Length {
return file.offset, errors.New("seek past end of file")
}
if offset == file.doc.Length {
// If we're seeking to the end of the file,
// no need to read anything. This enables
// a client to find the size of the file using only the
// io.ReadSeeker interface with low overhead.
file.offset = offset
return file.offset, nil
}
chunk := int(offset / int64(file.doc.ChunkSize))
if chunk+1 == file.chunk && offset >= file.offset {
file.rbuf = file.rbuf[int(offset-file.offset):]
file.offset = offset
return file.offset, nil
}
file.offset = offset
file.chunk = chunk
file.rbuf = nil
file.rbuf, err = file.getChunk()
if err == nil {
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
}
return file.offset, err
}
// Read reads into b the next available data from the file and
// returns the number of bytes written and an error in case
// something wrong happened. At the end of the file, n will
// be zero and err will be set to io.EOF.
//
// The parameters and behavior of this function turn the file
// into an io.Reader.
func (file *GridFile) Read(b []byte) (n int, err error) {
file.assertMode(gfsReading)
file.m.Lock()
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
defer file.m.Unlock()
if file.offset == file.doc.Length {
return 0, io.EOF
}
for err == nil {
i := copy(b, file.rbuf)
n += i
file.offset += int64(i)
file.rbuf = file.rbuf[i:]
if i == len(b) || file.offset == file.doc.Length {
break
}
b = b[i:]
file.rbuf, err = file.getChunk()
}
return n, err
}
func (file *GridFile) getChunk() (data []byte, err error) {
cache := file.rcache
file.rcache = nil
if cache != nil && cache.n == file.chunk {
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
cache.wait.Lock()
data, err = cache.data, cache.err
} else {
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
var doc gfsChunk
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
data = doc.Data
}
file.chunk++
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
// Read the next one in background.
cache = &gfsCachedChunk{n: file.chunk}
cache.wait.Lock()
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
// Clone the session to avoid having it closed in between.
chunks := file.gfs.Chunks
session := chunks.Database.Session.Clone()
go func(id interface{}, n int) {
defer session.Close()
chunks = chunks.With(session)
var doc gfsChunk
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
cache.data = doc.Data
cache.wait.Unlock()
}(file.doc.Id, file.chunk)
file.rcache = cache
}
debugf("Returning err: %#v", err)
return
}

708
vendor/gopkg.in/mgo.v2/gridfs_test.go generated vendored Normal file
View File

@@ -0,0 +1,708 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"io"
"os"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
func (s *S) TestGridFSCreate(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
before := bson.Now()
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
err = file.Close()
c.Assert(err, IsNil)
after := bson.Now()
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
ud, ok := result["uploadDate"].(time.Time)
c.Assert(ok, Equals, true)
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 9,
"chunkSize": 255 * 1024,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
}
c.Assert(result, DeepEquals, expected)
// Check the chunk.
result = M{}
err = db.C("fs.chunks").Find(nil).One(result)
c.Assert(err, IsNil)
chunkId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(chunkId.Valid(), Equals, true)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": 0,
"data": []byte("some data"),
}
c.Assert(result, DeepEquals, expected)
// Check that an index was created.
indexes, err := db.C("fs.chunks").Indexes()
c.Assert(err, IsNil)
c.Assert(len(indexes), Equals, 2)
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
}
func (s *S) TestGridFSFileDetails(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(file.Size(), Equals, int64(4))
n, err = file.Write([]byte(" data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 5)
c.Assert(file.Size(), Equals, int64(9))
id, _ := file.Id().(bson.ObjectId)
c.Assert(id.Valid(), Equals, true)
c.Assert(file.Name(), Equals, "myfile1.txt")
c.Assert(file.ContentType(), Equals, "")
var info interface{}
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, IsNil)
file.SetId("myid")
file.SetName("myfile2.txt")
file.SetContentType("text/plain")
file.SetMeta(M{"any": "thing"})
c.Assert(file.Id(), Equals, "myid")
c.Assert(file.Name(), Equals, "myfile2.txt")
c.Assert(file.ContentType(), Equals, "text/plain")
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
err = file.Close()
c.Assert(err, IsNil)
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
ud := file.UploadDate()
now := time.Now()
c.Assert(ud.Before(now), Equals, true)
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "myid",
"length": 9,
"chunkSize": 255 * 1024,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
"filename": "myfile2.txt",
"contentType": "text/plain",
"metadata": M{"any": "thing"},
}
c.Assert(result, DeepEquals, expected)
}
func (s *S) TestGridFSSetUploadDate(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
file.SetUploadDate(t)
err = file.Close()
c.Assert(err, IsNil)
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
ud := result["uploadDate"].(time.Time)
if !ud.Equal(t) {
c.Fatalf("want upload date %s, got %s", t, ud)
}
}
func (s *S) TestGridFSCreateWithChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
// Smaller than the chunk size.
n, err := file.Write([]byte("abc"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Boundary in the middle.
n, err = file.Write([]byte("defg"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
// Boundary at the end.
n, err = file.Write([]byte("hij"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Larger than the chunk size, with 3 chunks.
n, err = file.Write([]byte("klmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
err = file.Close()
c.Assert(err, IsNil)
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, _ := result["_id"].(bson.ObjectId)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 22,
"chunkSize": 5,
"uploadDate": "<timestamp>",
"md5": "44a66044834cbe55040089cabfc102d5",
}
c.Assert(result, DeepEquals, expected)
// Check the chunks.
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
for i := 0; ; i++ {
result = M{}
if !iter.Next(result) {
if i != 5 {
c.Fatalf("Expected 5 chunks, got %d", i)
}
break
}
c.Assert(iter.Close(), IsNil)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": i,
"data": []byte(dataChunks[i]),
}
c.Assert(result, DeepEquals, expected)
}
}
func (s *S) TestGridFSAbort(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
var count int
for i := 0; i < 10; i++ {
count, err = db.C("fs.chunks").Count()
if count > 0 || err != nil {
break
}
}
c.Assert(err, IsNil)
c.Assert(count, Equals, 1)
file.Abort()
err = file.Close()
c.Assert(err, ErrorMatches, "write aborted")
count, err = db.C("fs.chunks").Count()
c.Assert(err, IsNil)
c.Assert(count, Equals, 0)
}
func (s *S) TestGridFSCloseConflict(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
// For a closing-time conflict
err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
c.Assert(err, IsNil)
gfs := db.GridFS("fs")
file, err := gfs.Create("foo.txt")
c.Assert(err, IsNil)
_, err = file.Write([]byte("some data"))
c.Assert(err, IsNil)
err = file.Close()
c.Assert(mgo.IsDup(err), Equals, true)
count, err := db.C("fs.chunks").Count()
c.Assert(err, IsNil)
c.Assert(count, Equals, 0)
}
func (s *S) TestGridFSOpenNotFound(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.OpenId("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
file, err = gfs.Open("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
}
func (s *S) TestGridFSReadAll(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
n, err = file.Read(b)
c.Assert(n, Equals, 22)
c.Assert(err, IsNil)
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSReadChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
// Smaller than the chunk size.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("abc"))
// Boundary in the middle.
n, err = file.Read(b[:4])
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(b[:4], DeepEquals, []byte("defg"))
// Boundary at the end.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("hij"))
// Larger than the chunk size, with 3 chunks.
n, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSOpen(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
}
func (s *S) TestGridFSSeek(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
b := make([]byte, 5)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
o, err := file.Seek(3, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(3))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("defgh"))
o, err = file.Seek(5, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(13))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("nopqr"))
o, err = file.Seek(0, os.SEEK_END)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(22))
n, err = file.Read(b)
c.Assert(err, Equals, io.EOF)
c.Assert(n, Equals, 0)
o, err = file.Seek(-10, os.SEEK_END)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(12))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("mnopq"))
o, err = file.Seek(8, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(8))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("ijklm"))
// Trivial seek forward within same chunk. Already
// got the data, shouldn't touch the database.
sent := mgo.GetStats().SentOps
o, err = file.Seek(1, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(14))
c.Assert(mgo.GetStats().SentOps, Equals, sent)
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("opqrs"))
// Try seeking past end of file.
file.Seek(3, os.SEEK_SET)
o, err = file.Seek(23, os.SEEK_SET)
c.Assert(err, ErrorMatches, "seek past end of file")
c.Assert(o, Equals, int64(3))
}
func (s *S) TestGridFSRemoveId(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
id := file.Id()
file.Close()
err = gfs.RemoveId(id)
c.Assert(err, IsNil)
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSRemove(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
err = gfs.Remove("myfile.txt")
c.Assert(err, IsNil)
_, err = gfs.Open("myfile.txt")
c.Assert(err == mgo.ErrNotFound, Equals, true)
n, err := db.C("fs.chunks").Find(nil).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSOpenNext(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile2.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
var f *mgo.GridFile
var b [1]byte
iter := gfs.Find(nil).Sort("-filename").Iter()
ok := gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile2.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
// Do it again with a more restrictive query to make sure
// it's actually taken into account.
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
}

20
vendor/gopkg.in/mgo.v2/harness/certs/client.crt generated vendored Normal file
View File

@@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
z3A=
-----END CERTIFICATE-----

27
vendor/gopkg.in/mgo.v2/harness/certs/client.key generated vendored Normal file
View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
+HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
-----END RSA PRIVATE KEY-----

17
vendor/gopkg.in/mgo.v2/harness/certs/client.req generated vendored Normal file
View File

@@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE REQUEST-----
MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE
BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ
bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/
h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4
GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt
CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M
3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF
zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8
8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV
sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d
ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk
ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv
KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM
vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg
tb1s0MA=
-----END CERTIFICATE REQUEST-----

22
vendor/gopkg.in/mgo.v2/harness/certs/server.crt generated vendored Normal file
View File

@@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
yQ==
-----END CERTIFICATE-----

28
vendor/gopkg.in/mgo.v2/harness/certs/server.key generated vendored Normal file
View File

@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
SruEA1+5bfBRMW0P+h7Qfe4=
-----END PRIVATE KEY-----

View File

Binary file not shown.

0
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock generated vendored Executable file
View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg1/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONCOPTS \
--port 40101 \
--configsvr

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/cfg2/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONCOPTS \
--port 40102 \
--configsvr

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

9
vendor/gopkg.in/mgo.v2/harness/daemons/cfg3/run generated vendored Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/sh
. ../.env
exec mongod $COMMONCOPTS \
--port 40103 \
--configsvr \
--auth \
--keyFile=../../certs/keyfile

0
vendor/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty generated vendored Normal file
View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/db1/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

15
vendor/gopkg.in/mgo.v2/harness/daemons/db1/run generated vendored Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh
. ../.env
if [ x$NOIPV6 = x1 ]; then
BINDIP="127.0.0.1"
else
BINDIP="127.0.0.1,::1"
fi
exec mongod $COMMONDOPTSNOIP \
--shardsvr \
--bind_ip=$BINDIP \
--port 40001 \
--ipv6

0
vendor/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty generated vendored Normal file
View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/db2/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/db2/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--port 40002 \
--auth

0
vendor/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty generated vendored Normal file
View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/db3/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

12
vendor/gopkg.in/mgo.v2/harness/daemons/db3/run generated vendored Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--port 40003 \
--auth \
--sslMode preferSSL \
--sslCAFile ../../certs/server.pem \
--sslPEMKeyFile ../../certs/server.pem

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1a/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs1 \
--port 40011

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1b/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs1 \
--port 40012

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs1c/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs1 \
--port 40013

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2a/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs2 \
--port 40021

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2b/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs2 \
--port 40022

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs2c/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs2 \
--port 40023

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3a/run generated vendored Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs3 \
--port 40031 \
--keyFile=../../certs/keyfile

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3b/run generated vendored Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs3 \
--port 40032 \
--keyFile=../../certs/keyfile

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

9
vendor/gopkg.in/mgo.v2/harness/daemons/rs3c/run generated vendored Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs3 \
--port 40033 \
--keyFile=../../certs/keyfile

View File

3
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

8
vendor/gopkg.in/mgo.v2/harness/daemons/rs4a/run generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
. ../.env
exec mongod $COMMONDOPTS \
--shardsvr \
--replSet rs4 \
--port 40041

3
vendor/gopkg.in/mgo.v2/harness/daemons/s1/log/run generated vendored Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
exec cat - > log.txt

Some files were not shown because too many files have changed in this diff Show More