mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
Merge branch 'master' into app-yaml
This commit is contained in:
@@ -173,7 +173,7 @@ func (a *agent) Submit(callI Call) error {
|
||||
default:
|
||||
}
|
||||
|
||||
a.stats.Enqueue()
|
||||
a.stats.Enqueue(callI.Model().Path)
|
||||
|
||||
call := callI.(*call)
|
||||
ctx := call.req.Context()
|
||||
@@ -188,6 +188,7 @@ func (a *agent) Submit(callI Call) error {
|
||||
|
||||
slot, err := a.getSlot(ctx, call) // find ram available / running
|
||||
if err != nil {
|
||||
a.stats.Dequeue(callI.Model().Path)
|
||||
return err
|
||||
}
|
||||
// TODO if the call times out & container is created, we need
|
||||
@@ -197,16 +198,17 @@ func (a *agent) Submit(callI Call) error {
|
||||
// TODO Start is checking the timer now, we could do it here, too.
|
||||
err = call.Start(ctx)
|
||||
if err != nil {
|
||||
a.stats.Dequeue(callI.Model().Path)
|
||||
return err
|
||||
}
|
||||
|
||||
a.stats.Start()
|
||||
a.stats.Start(callI.Model().Path)
|
||||
|
||||
err = slot.exec(ctx, call)
|
||||
// pass this error (nil or otherwise) to end directly, to store status, etc
|
||||
// End may rewrite the error or elect to return it
|
||||
|
||||
a.stats.Complete()
|
||||
a.stats.Complete(callI.Model().Path)
|
||||
|
||||
// TODO: we need to allocate more time to store the call + logs in case the call timed out,
|
||||
// but this could put us over the timeout if the call did not reply yet (need better policy).
|
||||
|
||||
@@ -6,36 +6,83 @@ import "sync"
|
||||
// * hot containers active
|
||||
// * memory used / available
|
||||
|
||||
// global statistics
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
mu sync.Mutex
|
||||
// statistics for all functions combined
|
||||
queue uint64
|
||||
running uint64
|
||||
complete uint64
|
||||
// statistics for individual functions, keyed by function path
|
||||
functionStatsMap map[string]*functionStats
|
||||
}
|
||||
|
||||
// statistics for an individual function
|
||||
type functionStats struct {
|
||||
queue uint64
|
||||
running uint64
|
||||
complete uint64
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
// statistics for all functions combined
|
||||
Queue uint64
|
||||
Running uint64
|
||||
Complete uint64
|
||||
// statistics for individual functions, keyed by function path
|
||||
FunctionStatsMap map[string]*FunctionStats
|
||||
}
|
||||
|
||||
// statistics for an individual function
|
||||
type FunctionStats struct {
|
||||
Queue uint64
|
||||
Running uint64
|
||||
Complete uint64
|
||||
}
|
||||
|
||||
func (s *stats) Enqueue() {
|
||||
func (s *stats) getStatsForFunction(path string) *functionStats {
|
||||
if s.functionStatsMap == nil {
|
||||
s.functionStatsMap = make(map[string]*functionStats)
|
||||
}
|
||||
thisFunctionStats, found := s.functionStatsMap[path]
|
||||
if !found {
|
||||
thisFunctionStats = &functionStats{}
|
||||
s.functionStatsMap[path] = thisFunctionStats
|
||||
}
|
||||
|
||||
return thisFunctionStats
|
||||
}
|
||||
|
||||
func (s *stats) Enqueue(path string) {
|
||||
s.mu.Lock()
|
||||
s.queue++
|
||||
s.getStatsForFunction(path).queue++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *stats) Start() {
|
||||
// Call when a function has been queued but cannot be started because of an error
|
||||
func (s *stats) Dequeue(path string) {
|
||||
s.mu.Lock()
|
||||
s.queue--
|
||||
s.running++
|
||||
s.getStatsForFunction(path).queue--
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *stats) Complete() {
|
||||
func (s *stats) Start(path string) {
|
||||
s.mu.Lock()
|
||||
s.queue--
|
||||
s.getStatsForFunction(path).queue--
|
||||
s.running++
|
||||
s.getStatsForFunction(path).running++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *stats) Complete(path string) {
|
||||
s.mu.Lock()
|
||||
s.running--
|
||||
s.getStatsForFunction(path).running--
|
||||
s.complete++
|
||||
s.getStatsForFunction(path).complete++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -45,6 +92,11 @@ func (s *stats) Stats() Stats {
|
||||
stats.Running = s.running
|
||||
stats.Complete = s.complete
|
||||
stats.Queue = s.queue
|
||||
stats.FunctionStatsMap = make(map[string]*FunctionStats)
|
||||
for key, value := range s.functionStatsMap {
|
||||
thisFunctionStats := &FunctionStats{Queue: value.queue, Running: value.running, Complete: value.complete}
|
||||
stats.FunctionStatsMap[key] = thisFunctionStats
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return stats
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ type Server struct {
|
||||
}
|
||||
|
||||
// NewFromEnv creates a new Functions server based on env vars.
|
||||
func NewFromEnv(ctx context.Context) *Server {
|
||||
func NewFromEnv(ctx context.Context, opts ...ServerOption) *Server {
|
||||
ds, err := datastore.New(viper.GetString(EnvDBURL))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Fatalln("Error initializing datastore.")
|
||||
@@ -69,7 +69,7 @@ func NewFromEnv(ctx context.Context) *Server {
|
||||
}
|
||||
}
|
||||
|
||||
return New(ctx, ds, mq, logDB)
|
||||
return New(ctx, ds, mq, logDB, opts...)
|
||||
}
|
||||
|
||||
// New creates a new Functions server with the passed in datastore, message queue and API URL
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
package server
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ServerOption func(*Server)
|
||||
|
||||
@@ -9,3 +15,38 @@ func EnableShutdownEndpoint(halt context.CancelFunc) ServerOption {
|
||||
s.Router.GET("/shutdown", s.handleShutdown(halt))
|
||||
}
|
||||
}
|
||||
|
||||
func LimitRequestBody(max int64) ServerOption {
|
||||
return func(s *Server) {
|
||||
s.Router.Use(limitRequestBody(max))
|
||||
}
|
||||
}
|
||||
|
||||
func limitRequestBody(max int64) func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
cl := int64(c.Request.ContentLength)
|
||||
if cl > max {
|
||||
// try to deny this quickly, instead of just letting it get lopped off
|
||||
|
||||
handleErrorResponse(c, errTooBig{cl, max})
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
// if no Content-Length specified, limit how many bytes we read and error
|
||||
// if we hit the max (intercontinental anti-air missile defense system).
|
||||
// read http.MaxBytesReader for gritty details..
|
||||
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, max)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// models.APIError
|
||||
type errTooBig struct {
|
||||
n, max int64
|
||||
}
|
||||
|
||||
func (e errTooBig) Code() int { return http.StatusRequestEntityTooLarge }
|
||||
func (e errTooBig) Error() string {
|
||||
return fmt.Sprintf("Content-Length too large for this server, %d > max %d", e.n, e.max)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
// Version of Functions
|
||||
var Version = "0.3.106"
|
||||
var Version = "0.3.109"
|
||||
|
||||
@@ -17,12 +17,12 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const VERSION = "0.0.72"
|
||||
const VERSION = "0.0.75"
|
||||
|
||||
func main() {
|
||||
// XXX (reed): normalize
|
||||
fnodes := flag.String("nodes", "", "comma separated list of functions nodes")
|
||||
minAPIVersion := flag.String("min-api-version", "0.0.39", "minimal node API to accept")
|
||||
minAPIVersion := flag.String("min-api-version", "0.0.42", "minimal node API to accept")
|
||||
|
||||
var conf lb.Config
|
||||
flag.StringVar(&conf.DBurl, "db", "sqlite3://:memory:", "backend to store nodes, default to in memory")
|
||||
|
||||
76
glide.lock
generated
76
glide.lock
generated
@@ -1,16 +1,16 @@
|
||||
hash: 60c3aa7f40235c70cfcc42335f9795bd1af326dc46f817aabbe7098fdd9f91a1
|
||||
updated: 2017-09-05T11:30:26.153448972-07:00
|
||||
hash: 9c04b00c52a7378e748a93c062aacd32f39fece29a79b50fddad6aa81e2cbab0
|
||||
updated: 2017-09-19T14:13:44.550343214-07:00
|
||||
imports:
|
||||
- name: github.com/amir/raidman
|
||||
version: 1ccc43bfb9c93cb401a4025e49c64ba71e5e668b
|
||||
- name: github.com/apache/thrift
|
||||
version: 9235bec082127e84bf1b0353a0764c9060aca6d2
|
||||
version: 4c30c15924bfbc7c9e6bfc0e82630e97980e556e
|
||||
subpackages:
|
||||
- lib/go/thrift
|
||||
- name: github.com/asaskevich/govalidator
|
||||
version: 73945b6115bfbbcc57d89b7316e28109364124e1
|
||||
version: 15028e809df8c71964e8efa6c11e81d5c0262302
|
||||
- name: github.com/Azure/go-ansiterm
|
||||
version: fa152c58bc15761d0200cb75fe958b89a9d4888e
|
||||
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
subpackages:
|
||||
- winterm
|
||||
- name: github.com/beorn7/perks
|
||||
@@ -18,15 +18,15 @@ imports:
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/boltdb/bolt
|
||||
version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8
|
||||
version: fa5367d20c994db73282594be0146ab221657943
|
||||
- name: github.com/cactus/go-statsd-client
|
||||
version: ce77ca9ecdee1c3ffd097e32f9bb832825ccb203
|
||||
subpackages:
|
||||
- statsd
|
||||
- name: github.com/cenkalti/backoff
|
||||
version: 61153c768f31ee5f130071d08fc82b85208528de
|
||||
version: 80e08cb804a3eb3e576876c777e957e874609a9a
|
||||
- name: github.com/cloudflare/cfssl
|
||||
version: 42549e19d448b683fa35bcce1aea3bf193ee8037
|
||||
version: 7d88da830aad9d533c2fb8532da23f6a75331b52
|
||||
subpackages:
|
||||
- api
|
||||
- auth
|
||||
@@ -44,11 +44,11 @@ imports:
|
||||
- signer
|
||||
- signer/local
|
||||
- name: github.com/coreos/etcd
|
||||
version: 589a7a19ac469afa687ab1f7487dd5d4c2a6ee6a
|
||||
version: 5bb9f9591f01d0a3c61d2eb3a3bb281726005b2b
|
||||
subpackages:
|
||||
- raft/raftpb
|
||||
- name: github.com/coreos/go-semver
|
||||
version: 1817cd4bea52af76542157eeabd74b057d1a199e
|
||||
version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
|
||||
subpackages:
|
||||
- semver
|
||||
- name: github.com/davecgh/go-spew
|
||||
@@ -58,7 +58,7 @@ imports:
|
||||
- name: github.com/dchest/siphash
|
||||
version: 4ebf1de738443ea7f45f02dc394c4df1942a126d
|
||||
- name: github.com/dghubble/go-twitter
|
||||
version: f74be7f0f20b142558537ca43852457f7c52e051
|
||||
version: c4115fa44a928413e0b857e0eb47376ffde3a61a
|
||||
subpackages:
|
||||
- twitter
|
||||
- name: github.com/dghubble/oauth1
|
||||
@@ -68,7 +68,7 @@ imports:
|
||||
- name: github.com/dgrijalva/jwt-go
|
||||
version: a539ee1a749a2b895533f979515ac7e6e0f5b650
|
||||
- name: github.com/docker/cli
|
||||
version: f5a192bcc4c2794e44eb9dd7d91c2be95c5c6342
|
||||
version: 139fcd3ee95f37f3ac17b1200fb0a63908cb6781
|
||||
subpackages:
|
||||
- cli/config/configfile
|
||||
- name: github.com/docker/distribution
|
||||
@@ -132,7 +132,7 @@ imports:
|
||||
subpackages:
|
||||
- store
|
||||
- name: github.com/docker/libnetwork
|
||||
version: ba46b928444931e6865d8618dc03622cac79aa6f
|
||||
version: 6d098467ec58038b68620a3c2c418936661efa64
|
||||
subpackages:
|
||||
- datastore
|
||||
- discoverapi
|
||||
@@ -140,7 +140,7 @@ imports:
|
||||
- name: github.com/docker/libtrust
|
||||
version: aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
- name: github.com/docker/swarmkit
|
||||
version: 0554c9bc9a485025e89b8e5c2c1f0d75961906a2
|
||||
version: bd7bafb8a61de1f5f23c8215ce7b9ecbcb30ff21
|
||||
subpackages:
|
||||
- api
|
||||
- api/deepcopy
|
||||
@@ -167,12 +167,8 @@ imports:
|
||||
version: bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
- name: github.com/eapache/queue
|
||||
version: 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
- name: github.com/fsnotify/fsnotify
|
||||
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
|
||||
- name: github.com/fsouza/go-dockerclient
|
||||
version: 75772940379e725b5aae213e570f9dcd751951cb
|
||||
- name: github.com/fnproject/fn_go
|
||||
version: e046aa4ca1f1028a04fc51395297ff07515cb0b6
|
||||
version: 418dcd8e37593d86604e89a48d7ee2e109a1d3bf
|
||||
subpackages:
|
||||
- client
|
||||
- client/apps
|
||||
@@ -180,15 +176,19 @@ imports:
|
||||
- client/operations
|
||||
- client/routes
|
||||
- models
|
||||
- name: github.com/fsnotify/fsnotify
|
||||
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
|
||||
- name: github.com/fsouza/go-dockerclient
|
||||
version: 98edf3edfae6a6500fecc69d2bcccf1302544004
|
||||
- name: github.com/garyburd/redigo
|
||||
version: b925df3cc15d8646e9b5b333ebaf3011385aba11
|
||||
version: 70e1b1943d4fc9c56791abaa6f4d1e727b9ab925
|
||||
subpackages:
|
||||
- internal
|
||||
- redis
|
||||
- name: github.com/gin-contrib/sse
|
||||
version: 22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae
|
||||
- name: github.com/gin-gonic/gin
|
||||
version: 848fa41ca016fa3a3d385af710c4219c1cb477a4
|
||||
version: 5afc5b19730118c9b8324fe9dd995d44ec65c81a
|
||||
subpackages:
|
||||
- binding
|
||||
- json
|
||||
@@ -208,7 +208,7 @@ imports:
|
||||
subpackages:
|
||||
- fmts
|
||||
- name: github.com/go-openapi/runtime
|
||||
version: bf2ff8f7150788b1c7256abb0805ba0410cbbabb
|
||||
version: d6605b7c17ac3b1033ca794886e6142a4141f5b0
|
||||
subpackages:
|
||||
- client
|
||||
- name: github.com/go-openapi/spec
|
||||
@@ -220,7 +220,7 @@ imports:
|
||||
- name: github.com/go-openapi/validate
|
||||
version: 8a82927c942c94794a5cd8b8b50ce2f48a955c0c
|
||||
- name: github.com/go-sql-driver/mysql
|
||||
version: 26471af196a17ee75a22e6481b5a5897fb16b081
|
||||
version: 21d7e97c9f760ca685a01ecea202e1c84276daa1
|
||||
- name: github.com/gogo/protobuf
|
||||
version: 100ba4e885062801d56799d78530b73b178a78f3
|
||||
subpackages:
|
||||
@@ -267,7 +267,7 @@ imports:
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/hashicorp/hcl
|
||||
version: 8f6b1344a92ff8877cf24a5de9177bf7d0a2a187
|
||||
version: 68e816d1c783414e79bc65b3994d9ab6b0a722ab
|
||||
subpackages:
|
||||
- hcl/ast
|
||||
- hcl/parser
|
||||
@@ -278,7 +278,7 @@ imports:
|
||||
- json/scanner
|
||||
- json/token
|
||||
- name: github.com/iron-io/iron_go3
|
||||
version: 830335d420db87fc84cbff7f0d1348a46b499946
|
||||
version: ded317cb147d3b52b593da08495bc7d53efa17d8
|
||||
subpackages:
|
||||
- api
|
||||
- config
|
||||
@@ -290,17 +290,17 @@ imports:
|
||||
subpackages:
|
||||
- reflectx
|
||||
- name: github.com/json-iterator/go
|
||||
version: 8c7fc7584a2a4dad472a39a85889dabb3091dfb1
|
||||
version: fdfe0b9a69118ff692d6e1005e9de7e0cffb7d6b
|
||||
- name: github.com/kr/logfmt
|
||||
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
|
||||
- name: github.com/lib/pq
|
||||
version: e42267488fe361b9dc034be7a6bffef5b195bceb
|
||||
version: 23da1db4f16d9658a86ae9b717c245fc078f10f1
|
||||
subpackages:
|
||||
- oid
|
||||
- name: github.com/magiconair/properties
|
||||
version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a
|
||||
- name: github.com/mailru/easyjson
|
||||
version: 2a92e673c9a6302dd05c3a691ae1f24aef46457d
|
||||
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
@@ -322,7 +322,7 @@ imports:
|
||||
- name: github.com/opencontainers/go-digest
|
||||
version: 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
- name: github.com/opencontainers/image-spec
|
||||
version: 7653c236dd968a4f18c94d588591d98dea106323
|
||||
version: ebd93fd0782379ca3d821f0fa74f0651a9347a3e
|
||||
subpackages:
|
||||
- specs-go
|
||||
- specs-go/v1
|
||||
@@ -339,7 +339,7 @@ imports:
|
||||
- ext
|
||||
- log
|
||||
- name: github.com/openzipkin/zipkin-go-opentracing
|
||||
version: 37e942825de0f846d15acc3bc9d027c9134a9b25
|
||||
version: 9c88fa03bfdfaa5fec7cd1b40f3d10ec15c15fc6
|
||||
subpackages:
|
||||
- flag
|
||||
- thrift/gen-go/scribe
|
||||
@@ -357,7 +357,7 @@ imports:
|
||||
subpackages:
|
||||
- xxHash32
|
||||
- name: github.com/pkg/errors
|
||||
version: c605e284fe17294bda444b34710735b29d1a9d90
|
||||
version: 2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb
|
||||
- name: github.com/prometheus/client_golang
|
||||
version: c5b7fccd204277076155f10851dad72b76a49317
|
||||
subpackages:
|
||||
@@ -367,7 +367,7 @@ imports:
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: 49fee292b27bfff7f354ee0f64e1bc4850462edf
|
||||
version: 2f17f4a9d485bf34b4bfaccc273805040e4f86c8
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
@@ -377,13 +377,13 @@ imports:
|
||||
subpackages:
|
||||
- xfs
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: f619812e3caf603a8df60a7ec6f2654b703189ef
|
||||
version: 7cf257f0a33260797b0febf39f95fccd86aab2a3
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: de5bf2ad457846296e2031421a34e2568e304e35
|
||||
- name: github.com/rcrowley/go-metrics
|
||||
version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
- name: github.com/Shopify/sarama
|
||||
version: 15174039fd207656a0f97f52bc78ec7793deeada
|
||||
version: 4704a3a8c95920361c47e9a2adec13c3d757c757
|
||||
- name: github.com/sirupsen/logrus
|
||||
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
subpackages:
|
||||
@@ -401,11 +401,11 @@ imports:
|
||||
- name: github.com/spf13/viper
|
||||
version: 25b30aa063fc18e48662b86996252eabdcf2f0c7
|
||||
- name: github.com/ugorji/go
|
||||
version: 8c0409fcbb70099c748d71f714529204975f6c3f
|
||||
version: 54210f4e076c57f351166f0ed60e67d3fca57a36
|
||||
subpackages:
|
||||
- codec
|
||||
- name: golang.org/x/crypto
|
||||
version: 81e90905daefcd6fd217b62423c0908922eadb30
|
||||
version: 7d9177d70076375b9a59c8fde23d52d9c4a7ecd5
|
||||
subpackages:
|
||||
- bcrypt
|
||||
- blowfish
|
||||
@@ -414,7 +414,7 @@ imports:
|
||||
- pkcs12/internal/rc2
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
|
||||
version: 66aacef3dd8a676686c7ae3716979581e8b03c47
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
@@ -425,7 +425,7 @@ imports:
|
||||
- lex/httplex
|
||||
- trace
|
||||
- name: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
version: 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
|
||||
@@ -22,7 +22,7 @@ import:
|
||||
- package: github.com/dghubble/oauth1
|
||||
- package: github.com/dgrijalva/jwt-go
|
||||
- package: github.com/docker/cli
|
||||
version: f5a192bcc4c2794e44eb9dd7d91c2be95c5c6342
|
||||
version: 139fcd3ee95f37f3ac17b1200fb0a63908cb6781
|
||||
subpackages:
|
||||
- cli/config/configfile
|
||||
- package: github.com/docker/distribution
|
||||
@@ -44,6 +44,7 @@ import:
|
||||
- package: github.com/go-openapi/swag
|
||||
- package: github.com/go-openapi/validate
|
||||
- package: github.com/go-sql-driver/mysql
|
||||
version: 21d7e97c9f760ca685a01ecea202e1c84276daa1
|
||||
- package: github.com/google/btree
|
||||
- package: github.com/iron-io/iron_go3
|
||||
subpackages:
|
||||
@@ -67,7 +68,9 @@ import:
|
||||
- package: github.com/opencontainers/runc
|
||||
version: ae2948042b08ad3d6d13cd09f40a50ffff4fc688
|
||||
- package: github.com/Azure/go-ansiterm
|
||||
version: fa152c58bc15761d0200cb75fe958b89a9d4888e
|
||||
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
- package: github.com/prometheus/common
|
||||
version: 2f17f4a9d485bf34b4bfaccc273805040e4f86c8
|
||||
testImport:
|
||||
- package: github.com/patrickmn/go-cache
|
||||
branch: master
|
||||
|
||||
2
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
2
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
@@ -5,7 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var logger *logrus.Logger
|
||||
|
||||
2
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
2
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/go-ansiterm"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var logger *logrus.Logger
|
||||
|
||||
2
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
2
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
@@ -299,7 +299,7 @@ func sortQuery(u *url.URL) {
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k := range q {
|
||||
for k, _ := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
|
||||
192
vendor/github.com/PuerkitoBio/purell/purell_test.go
generated
vendored
192
vendor/github.com/PuerkitoBio/purell/purell_test.go
generated
vendored
@@ -16,672 +16,672 @@ type testCase struct {
|
||||
|
||||
var (
|
||||
cases = [...]*testCase{
|
||||
{
|
||||
&testCase{
|
||||
"LowerScheme",
|
||||
"HTTP://www.SRC.ca",
|
||||
FlagLowercaseScheme,
|
||||
"http://www.SRC.ca",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"LowerScheme2",
|
||||
"http://www.SRC.ca",
|
||||
FlagLowercaseScheme,
|
||||
"http://www.SRC.ca",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"LowerHost",
|
||||
"HTTP://www.SRC.ca/",
|
||||
FlagLowercaseHost,
|
||||
"http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UpperEscapes",
|
||||
`http://www.whatever.com/Some%aa%20Special%8Ecases/`,
|
||||
FlagUppercaseEscapes,
|
||||
"http://www.whatever.com/Some%AA%20Special%8Ecases/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UnnecessaryEscapes",
|
||||
`http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`,
|
||||
FlagDecodeUnnecessaryEscapes,
|
||||
"http://www.toto.com/AB.D/23R-/_~",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDefaultPort",
|
||||
"HTTP://www.SRC.ca:80/",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDefaultPort2",
|
||||
"HTTP://www.SRC.ca:80",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDefaultPort3",
|
||||
"HTTP://www.SRC.ca:8080",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Safe",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
|
||||
FlagsSafe,
|
||||
"http://www.src.ca/to%1Ato%8B%EE/OKnowABC~",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"BothLower",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
|
||||
FlagLowercaseHost | FlagLowercaseScheme,
|
||||
"http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveTrailingSlash",
|
||||
"HTTP://www.SRC.ca:80/",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveTrailingSlash2",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveTrailingSlash3",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/fin/?a=1",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"AddTrailingSlash",
|
||||
"HTTP://www.SRC.ca:80",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"AddTrailingSlash2",
|
||||
"HTTP://www.SRC.ca:80/toto/titi.html",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"AddTrailingSlash3",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/fin?a=1",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDotSegments",
|
||||
"HTTP://root/a/b/./../../c/",
|
||||
FlagRemoveDotSegments,
|
||||
"http://root/c/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDotSegments2",
|
||||
"HTTP://root/../a/b/./../c/../d",
|
||||
FlagRemoveDotSegments,
|
||||
"http://root/a/d", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UsuallySafe",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test",
|
||||
FlagsUsuallySafeGreedy,
|
||||
"http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDirectoryIndex",
|
||||
"HTTP://root/a/b/c/default.aspx",
|
||||
FlagRemoveDirectoryIndex,
|
||||
"http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDirectoryIndex2",
|
||||
"HTTP://root/a/b/c/default#a=b",
|
||||
FlagRemoveDirectoryIndex,
|
||||
"http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveFragment",
|
||||
"HTTP://root/a/b/c/default#toto=tata",
|
||||
FlagRemoveFragment,
|
||||
"http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"ForceHTTP",
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
FlagForceHTTP,
|
||||
"http://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDuplicateSlashes",
|
||||
"https://root/a//b///c////default#toto=tata",
|
||||
FlagRemoveDuplicateSlashes,
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveDuplicateSlashes2",
|
||||
"https://root//a//b///c////default#toto=tata",
|
||||
FlagRemoveDuplicateSlashes,
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveWWW",
|
||||
"https://www.root/a/b/c/",
|
||||
FlagRemoveWWW,
|
||||
"https://root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveWWW2",
|
||||
"https://WwW.Root/a/b/c/",
|
||||
FlagRemoveWWW,
|
||||
"https://Root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"AddWWW",
|
||||
"https://Root/a/b/c/",
|
||||
FlagAddWWW,
|
||||
"https://www.Root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"SortQuery",
|
||||
"http://root/toto/?b=4&a=1&c=3&b=2&a=5",
|
||||
FlagSortQuery,
|
||||
"http://root/toto/?a=1&a=5&b=2&b=4&c=3",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"RemoveEmptyQuerySeparator",
|
||||
"http://root/toto/?",
|
||||
FlagRemoveEmptyQuerySeparator,
|
||||
"http://root/toto/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Unsafe",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUnsafeGreedy,
|
||||
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Safe2",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsSafe,
|
||||
"https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UsuallySafe2",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUsuallySafeGreedy,
|
||||
"https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"AddTrailingSlashBug",
|
||||
"http://src.ca/",
|
||||
FlagsAllNonGreedy,
|
||||
"http://www.src.ca/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"SourceModified",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUnsafeGreedy,
|
||||
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
|
||||
true,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"IPv6-1",
|
||||
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"IPv6-2",
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"IPv6-3",
|
||||
"http://[::ffff:192.168.1.1]:80/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"IPv6-4",
|
||||
"htTps://[::fFff:192.168.1.1]:443/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"https://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"FTP",
|
||||
"ftp://user:pass@ftp.foo.net/foo/bar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"ftp://user:pass@ftp.foo.net/foo/bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-1",
|
||||
"http://www.foo.com:80/foo",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-2",
|
||||
"http://www.foo.com:8000/foo",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com:8000/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-3",
|
||||
"http://www.foo.com/%7ebar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/~bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-4",
|
||||
"http://www.foo.com/%7Ebar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/~bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-5",
|
||||
"http://USER:pass@www.Example.COM/foo/bar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://USER:pass@www.example.com/foo/bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-6",
|
||||
"http://test.example/?a=%26&b=1",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/?a=%26&b=1",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-7",
|
||||
"http://test.example/%25/?p=%20val%20%25",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/%25/?p=%20val%20%25",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-8",
|
||||
"http://test.example/path/with a%20space+/",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/path/with%20a%20space+/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-9",
|
||||
"http://test.example/?",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Standard-10",
|
||||
"http://a.COM/path/?b&a",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://a.com/path/?b&a",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"StandardCasesAddTrailingSlash",
|
||||
"http://test.example?",
|
||||
FlagsSafe | FlagAddTrailingSlash,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"OctalIP-1",
|
||||
"http://0123.011.0.4/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://0123.011.0.4/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"OctalIP-2",
|
||||
"http://0102.0146.07.0223/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"OctalIP-3",
|
||||
"http://0102.0146.07.0223.:23/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"OctalIP-4",
|
||||
"http://USER:pass@0102.0146.07.0223../",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"DWORDIP-1",
|
||||
"http://123.1113982867/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://123.1113982867/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"DWORDIP-2",
|
||||
"http://1113982867/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"DWORDIP-3",
|
||||
"http://1113982867.:23/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"DWORDIP-4",
|
||||
"http://USER:pass@1113982867../",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"HexIP-1",
|
||||
"http://0x123.1113982867/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://0x123.1113982867/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"HexIP-2",
|
||||
"http://0x42660793/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"HexIP-3",
|
||||
"http://0x42660793.:23/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"HexIP-4",
|
||||
"http://USER:pass@0x42660793../",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-1",
|
||||
"http://.www.foo.com../foo/bar.html",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com/foo/bar.html",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-2",
|
||||
"http://www.foo.com./foo/bar.html",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com/foo/bar.html",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-3",
|
||||
"http://www.foo.com.:81/foo",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com:81/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-4",
|
||||
"http://www.example.com./",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.example.com/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"EmptyPort-1",
|
||||
"http://www.thedraymin.co.uk:/main/?p=308",
|
||||
FlagsSafe | FlagRemoveEmptyPortSeparator,
|
||||
"http://www.thedraymin.co.uk/main/?p=308",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"EmptyPort-2",
|
||||
"http://www.src.ca:",
|
||||
FlagsSafe | FlagRemoveEmptyPortSeparator,
|
||||
"http://www.src.ca",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-1",
|
||||
"http://test.example/foo/bar/.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-2",
|
||||
"http://test.example/foo/bar/./",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-3",
|
||||
"http://test.example/foo/bar/..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-4",
|
||||
"http://test.example/foo/bar/../",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-5",
|
||||
"http://test.example/foo/bar/../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/baz",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-6",
|
||||
"http://test.example/foo/bar/../..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-7",
|
||||
"http://test.example/foo/bar/../../",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-8",
|
||||
"http://test.example/foo/bar/../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-9",
|
||||
"http://test.example/foo/bar/../../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-10",
|
||||
"http://test.example/foo/bar/../../../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-11",
|
||||
"http://test.example/./foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-12",
|
||||
"http://test.example/../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-13",
|
||||
"http://test.example/foo.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo.",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-14",
|
||||
"http://test.example/.foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/.foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-15",
|
||||
"http://test.example/foo..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo..",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-16",
|
||||
"http://test.example/..foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/..foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-17",
|
||||
"http://test.example/./../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-18",
|
||||
"http://test.example/./foo/.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-19",
|
||||
"http://test.example/foo/./bar",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-20",
|
||||
"http://test.example/foo/../bar",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-21",
|
||||
"http://test.example/foo//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Slashes-22",
|
||||
"http://test.example/foo///bar//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Relative",
|
||||
"foo/bar",
|
||||
FlagsAllGreedy,
|
||||
"foo/bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Relative-1",
|
||||
"./../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Relative-2",
|
||||
"./foo/bar/../baz/../bang/..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Relative-3",
|
||||
"foo///bar//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo/bar/",
|
||||
false,
|
||||
},
|
||||
{
|
||||
&testCase{
|
||||
"Relative-4",
|
||||
"www.youtube.com",
|
||||
FlagsUsuallySafeGreedy,
|
||||
|
||||
3
vendor/github.com/Shopify/sarama/client.go
generated
vendored
3
vendor/github.com/Shopify/sarama/client.go
generated
vendored
@@ -612,6 +612,9 @@ func (client *client) backgroundMetadataUpdater() {
|
||||
if specificTopics, err := client.Topics(); err != nil {
|
||||
Logger.Println("Client background metadata topic load:", err)
|
||||
break
|
||||
} else if len(specificTopics) == 0 {
|
||||
Logger.Println("Client background metadata update: no specific topics to update")
|
||||
break
|
||||
} else {
|
||||
topics = specificTopics
|
||||
}
|
||||
|
||||
18
vendor/github.com/Shopify/sarama/config.go
generated
vendored
18
vendor/github.com/Shopify/sarama/config.go
generated
vendored
@@ -196,11 +196,23 @@ type Config struct {
|
||||
// Equivalent to the JVM's `fetch.wait.max.ms`.
|
||||
MaxWaitTime time.Duration
|
||||
|
||||
// The maximum amount of time the consumer expects a message takes to process
|
||||
// for the user. If writing to the Messages channel takes longer than this,
|
||||
// that partition will stop fetching more messages until it can proceed again.
|
||||
// The maximum amount of time the consumer expects a message takes to
|
||||
// process for the user. If writing to the Messages channel takes longer
|
||||
// than this, that partition will stop fetching more messages until it
|
||||
// can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
// If a message is not written to the Messages channel between two ticks
|
||||
// of the expiryTicker then a timeout is detected.
|
||||
// Using a ticker instead of a timer to detect timeouts should typically
|
||||
// result in many fewer calls to Timer functions which may result in a
|
||||
// significant performance improvement if many messages are being sent
|
||||
// and timeouts are infrequent.
|
||||
// The disadvantage of using a ticker instead of a timer is that
|
||||
// timeouts will be less accurate. That is, the effective timeout could
|
||||
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
|
||||
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
|
||||
// between two messages being sent may not be recognized as a timeout.
|
||||
MaxProcessingTime time.Duration
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
|
||||
36
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
36
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
@@ -440,35 +440,37 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
|
||||
|
||||
func (child *partitionConsumer) responseFeeder() {
|
||||
var msgs []*ConsumerMessage
|
||||
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut := false
|
||||
msgSent := false
|
||||
|
||||
feederLoop:
|
||||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
|
||||
|
||||
for i, msg := range msgs {
|
||||
if !expiryTimer.Stop() && !expireTimedOut {
|
||||
// expiryTimer was expired; clear out the waiting msg
|
||||
<-expiryTimer.C
|
||||
}
|
||||
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut = false
|
||||
|
||||
messageSelect:
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
case <-expiryTimer.C:
|
||||
expireTimedOut = true
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
msgSent = true
|
||||
case <-expiryTicker.C:
|
||||
if !msgSent {
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
} else {
|
||||
// current message has not been sent, return to select
|
||||
// statement
|
||||
msgSent = false
|
||||
goto messageSelect
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
}
|
||||
}
|
||||
|
||||
expiryTicker.Stop()
|
||||
child.broker.acks.Done()
|
||||
}
|
||||
|
||||
|
||||
42
vendor/github.com/Shopify/sarama/consumer_test.go
generated
vendored
42
vendor/github.com/Shopify/sarama/consumer_test.go
generated
vendored
@@ -803,6 +803,48 @@ func TestConsumerOffsetOutOfRange(t *testing.T) {
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerExpiryTicker(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
for i := 1; i <= 8; i++ {
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
|
||||
}
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 through 8 are read
|
||||
for i := 1; i <= 8; i++ {
|
||||
assertMessageOffset(t, <-consumer.Messages(), int64(i))
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
|
||||
if msg.Offset != expectedOffset {
|
||||
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
|
||||
|
||||
3
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
3
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
@@ -2,6 +2,7 @@ package sarama
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -106,7 +107,7 @@ func TestMessageEncoding(t *testing.T) {
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionGZIP
|
||||
if runtime.Version() == "go1.8" {
|
||||
if runtime.Version() == "go1.8" || strings.HasPrefix(runtime.Version(), "go1.8.") {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
|
||||
} else {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
|
||||
18
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
18
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
@@ -151,6 +151,13 @@ type PartitionOffsetManager interface {
|
||||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(offset int64, metadata string)
|
||||
|
||||
// ResetOffset resets to the provided offset, alongside a metadata string that
|
||||
// represents the state of the partition consumer at that point in time. Reset
|
||||
// acts as a counterpart to MarkOffset, the difference being that it allows to
|
||||
// reset an offset to an earlier or smaller value, where MarkOffset only
|
||||
// allows incrementing the offset. cf MarkOffset for more details.
|
||||
ResetOffset(offset int64, metadata string)
|
||||
|
||||
// Errors returns a read channel of errors that occur during offset management, if
|
||||
// enabled. By default, errors are logged and not returned over this channel. If
|
||||
// you want to implement any custom error handling, set your config's
|
||||
@@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset < pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
64
vendor/github.com/Shopify/sarama/offset_manager_test.go
generated
vendored
64
vendor/github.com/Shopify/sarama/offset_manager_test.go
generated
vendored
@@ -204,6 +204,70 @@ func TestPartitionOffsetManagerNextOffset(t *testing.T) {
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Retention = time.Hour
|
||||
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
handler := func(req *request) (res encoder) {
|
||||
if req.body.version() != 2 {
|
||||
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
|
||||
}
|
||||
offsetCommitRequest := req.body.(*OffsetCommitRequest)
|
||||
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
|
||||
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
|
||||
}
|
||||
return ocResponse
|
||||
}
|
||||
coordinator.setHandler(handler)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
logrus
|
||||
15
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
15
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
113
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
113
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@@ -1,113 +0,0 @@
|
||||
# 1.0.3
|
||||
|
||||
* Replace example files with testable examples
|
||||
|
||||
# 1.0.2
|
||||
|
||||
* bug: quote non-string values in text formatter (#583)
|
||||
* Make (*Logger) SetLevel a public method
|
||||
|
||||
# 1.0.1
|
||||
|
||||
* bug: fix escaping in text formatter (#575)
|
||||
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
# 0.11.4
|
||||
|
||||
* bug: fix undefined variable on solaris (#493)
|
||||
|
||||
# 0.11.3
|
||||
|
||||
* formatter: configure quoting of empty values (#484)
|
||||
* formatter: configure quoting character (default is `"`) (#484)
|
||||
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||
|
||||
# 0.11.2
|
||||
|
||||
* bug: fix windows terminal detection (#476)
|
||||
|
||||
# 0.11.1
|
||||
|
||||
* bug: fix tty detection with custom out (#471)
|
||||
|
||||
# 0.11.0
|
||||
|
||||
* performance: Use bufferpool to allocate (#370)
|
||||
* terminal: terminal detection for app-engine (#343)
|
||||
* feature: exit handler (#375)
|
||||
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
507
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
507
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@@ -1,507 +0,0 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
|
||||
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||
username dot com`.
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||
| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||
| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
|
||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safety
|
||||
|
||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
||||
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var handlers = []func(){}
|
||||
|
||||
func runHandler(handler func()) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
handler()
|
||||
}
|
||||
|
||||
func runHandlers() {
|
||||
for _, handler := range handlers {
|
||||
runHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
func Exit(code int) {
|
||||
runHandlers()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
83
vendor/github.com/Sirupsen/logrus/alt_exit_test.go
generated
vendored
83
vendor/github.com/Sirupsen/logrus/alt_exit_test.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
current := len(handlers)
|
||||
RegisterExitHandler(func() {})
|
||||
if len(handlers) != current+1 {
|
||||
t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "test_handler")
|
||||
if err != nil {
|
||||
log.Fatalf("can't create temp dir. %q", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
gofile := filepath.Join(tempDir, "gofile.go")
|
||||
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
|
||||
t.Fatalf("can't create go file. %q", err)
|
||||
}
|
||||
|
||||
outfile := filepath.Join(tempDir, "outfile.out")
|
||||
arg := time.Now().UTC().String()
|
||||
err = exec.Command("go", "run", gofile, outfile, arg).Run()
|
||||
if err == nil {
|
||||
t.Fatalf("completed normally, should have failed")
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(outfile)
|
||||
if err != nil {
|
||||
t.Fatalf("can't read output file %s. %q", outfile, err)
|
||||
}
|
||||
|
||||
if string(data) != arg {
|
||||
t.Fatalf("bad data. Expected %q, got %q", data, arg)
|
||||
}
|
||||
}
|
||||
|
||||
var testprog = []byte(`
|
||||
// Test program for atexit, gets output file and data as arguments and writes
|
||||
// data to output file in atexit handler.
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var outfile = ""
|
||||
var data = ""
|
||||
|
||||
func handler() {
|
||||
ioutil.WriteFile(outfile, []byte(data), 0666)
|
||||
}
|
||||
|
||||
func badHandler() {
|
||||
n := 0
|
||||
fmt.Println(1/n)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
outfile = flag.Arg(0)
|
||||
data = flag.Arg(1)
|
||||
|
||||
logrus.RegisterExitHandler(handler)
|
||||
logrus.RegisterExitHandler(badHandler)
|
||||
logrus.Fatal("Bye bye")
|
||||
}
|
||||
`)
|
||||
14
vendor/github.com/Sirupsen/logrus/appveyor.yml
generated
vendored
14
vendor/github.com/Sirupsen/logrus/appveyor.yml
generated
vendored
@@ -1,14 +0,0 @@
|
||||
version: "{build}"
|
||||
platform: x64
|
||||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
build_script:
|
||||
- go get -t
|
||||
- go test
|
||||
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
279
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
279
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
@@ -1,279 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufferPool *sync.Pool
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str := string(serialized)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := make(Fields, len(entry.Data)+len(fields))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
err := entry.Logger.Hooks.Fire(level, &entry)
|
||||
entry.Logger.mu.Unlock()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
||||
entry.Buffer = nil
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
} else {
|
||||
entry.Logger.mu.Lock()
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
||||
77
vendor/github.com/Sirupsen/logrus/entry_test.go
generated
vendored
77
vendor/github.com/Sirupsen/logrus/entry_test.go
generated
vendored
@@ -1,77 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryWithError(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
defer func() {
|
||||
ErrorKey = "error"
|
||||
}()
|
||||
|
||||
err := fmt.Errorf("kaboom at layer %d", 4711)
|
||||
|
||||
assert.Equal(err, WithError(err).Data["error"])
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["error"])
|
||||
|
||||
ErrorKey = "err"
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["err"])
|
||||
|
||||
}
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
||||
69
vendor/github.com/Sirupsen/logrus/example_basic_test.go
generated
vendored
69
vendor/github.com/Sirupsen/logrus/example_basic_test.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_basic() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) //default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Level = logrus.DebugLevel
|
||||
log.Out = os.Stdout
|
||||
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
entry := err.(*logrus.Entry)
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err_animal": entry.Data["animal"],
|
||||
"err_size": entry.Data["size"],
|
||||
"err_level": entry.Level,
|
||||
"err_message": entry.Message,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
|
||||
// Output:
|
||||
// level=debug msg="Started observing beach" animal=walrus number=8
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=debug msg="Temperature changes" temperature=-4
|
||||
// level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
// level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
|
||||
}
|
||||
35
vendor/github.com/Sirupsen/logrus/example_hook_test.go
generated
vendored
35
vendor/github.com/Sirupsen/logrus/example_hook_test.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_hook() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||
log.Out = os.Stdout
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!")
|
||||
|
||||
// Output:
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=error msg="The ice breaks!" number=100 omg=true
|
||||
}
|
||||
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
@@ -1,193 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
func WithError(err error) *Entry {
|
||||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
||||
45
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
45
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
if t, ok := data["time"]; ok {
|
||||
data["fields.time"] = t
|
||||
}
|
||||
|
||||
if m, ok := data["msg"]; ok {
|
||||
data["fields.msg"] = m
|
||||
}
|
||||
|
||||
if l, ok := data["level"]; ok {
|
||||
data["fields.level"] = l
|
||||
}
|
||||
}
|
||||
101
vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
101
vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
var errorFields = Fields{
|
||||
"foo": fmt.Errorf("bar"),
|
||||
"baz": fmt.Errorf("qux"),
|
||||
}
|
||||
|
||||
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
logger := New()
|
||||
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
Logger: logger,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
||||
144
vendor/github.com/Sirupsen/logrus/hook_test.go
generated
vendored
144
vendor/github.com/Sirupsen/logrus/hook_test.go
generated
vendored
@@ -1,144 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddHookRace(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
hook := new(ErrorHook)
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
log.AddHook(hook)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
log.Error("test")
|
||||
}()
|
||||
wg.Wait()
|
||||
}, func(fields Fields) {
|
||||
// the line may have been logged
|
||||
// before the hook was added, so we can't
|
||||
// actually assert on the hook
|
||||
})
|
||||
}
|
||||
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
39
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
39
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
@@ -1,39 +0,0 @@
|
||||
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
55
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
55
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
@@ -1,55 +0,0 @@
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
type SyslogHook struct {
|
||||
Writer *syslog.Writer
|
||||
SyslogNetwork string
|
||||
SyslogRaddr string
|
||||
}
|
||||
|
||||
// Creates a hook to be added to an instance of logger. This is called with
|
||||
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &SyslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.FatalLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.ErrorLevel:
|
||||
return hook.Writer.Err(line)
|
||||
case logrus.WarnLevel:
|
||||
return hook.Writer.Warning(line)
|
||||
case logrus.InfoLevel:
|
||||
return hook.Writer.Info(line)
|
||||
case logrus.DebugLevel:
|
||||
return hook.Writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
27
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
27
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"log/syslog"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
log := logrus.New()
|
||||
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local syslog.")
|
||||
}
|
||||
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
for _, level := range hook.Levels() {
|
||||
if len(log.Hooks[level]) != 1 {
|
||||
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Congratulations!")
|
||||
}
|
||||
95
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
95
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
@@ -1,95 +0,0 @@
|
||||
// The Test package is used for testing logrus. It is here for backwards
|
||||
// compatibility from when logrus' organization was upper-case. Please use
|
||||
// lower-case logrus and the `null` package instead of this one.
|
||||
package test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Hook is a hook designed for dealing with logs in test scenarios.
|
||||
type Hook struct {
|
||||
// Entries is an array of all entries that have been received by this hook.
|
||||
// For safe access, use the AllEntries() method, rather than reading this
|
||||
// value directly.
|
||||
Entries []*logrus.Entry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGlobal installs a test hook for the global logger.
|
||||
func NewGlobal() *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logrus.AddHook(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewLocal installs a test hook for a given local logger.
|
||||
func NewLocal(logger *logrus.Logger) *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewNullLogger creates a discarding logger and installs the test hook.
|
||||
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||
|
||||
logger := logrus.New()
|
||||
logger.Out = ioutil.Discard
|
||||
|
||||
return logger, NewLocal(logger)
|
||||
|
||||
}
|
||||
|
||||
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = append(t.Entries, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Hook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
// LastEntry returns the last entry that was logged or nil.
|
||||
func (t *Hook) LastEntry() *logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
i := len(t.Entries) - 1
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
// Make a copy, for safety
|
||||
e := *t.Entries[i]
|
||||
return &e
|
||||
}
|
||||
|
||||
// AllEntries returns all entries that were logged.
|
||||
func (t *Hook) AllEntries() []*logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
// Make a copy so the returned value won't race with future log requests
|
||||
entries := make([]*logrus.Entry, len(t.Entries))
|
||||
for i, entry := range t.Entries {
|
||||
// Make a copy, for safety
|
||||
e := *entry
|
||||
entries[i] = &e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// Reset removes all Entries from this test hook.
|
||||
func (t *Hook) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = make([]*logrus.Entry, 0)
|
||||
}
|
||||
39
vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
generated
vendored
39
vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAllHooks(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
logger, hook := NewNullLogger()
|
||||
assert.Nil(hook.LastEntry())
|
||||
assert.Equal(0, len(hook.Entries))
|
||||
|
||||
logger.Error("Hello error")
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
|
||||
logger.Warn("Hello warning")
|
||||
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello warning", hook.LastEntry().Message)
|
||||
assert.Equal(2, len(hook.Entries))
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(hook.LastEntry())
|
||||
assert.Equal(0, len(hook.Entries))
|
||||
|
||||
hook = NewGlobal()
|
||||
|
||||
logrus.Error("Hello error")
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
|
||||
}
|
||||
79
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
79
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@@ -1,79 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
||||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
// JSONFormatter formats logs into parsable json
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
199
vendor/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
199
vendor/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
@@ -1,199 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorNotLost(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["error"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["omg"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithTime(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("time", "right now!"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.time"] != "right now!" {
|
||||
t.Fatal("fields.time not set to original time field")
|
||||
}
|
||||
|
||||
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithMsg(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("msg", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.msg"] != "something" {
|
||||
t.Fatal("fields.msg not set to original msg field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithLevel(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.level"] != "something" {
|
||||
t.Fatal("fields.level not set to original level field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
if b[len(b)-1] != '\n' {
|
||||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONMessageKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyMsg: "message",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(&Entry{Message: "oh hai"})
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
|
||||
t.Fatal("Expected JSON to format message key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONLevelKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyLevel: "somelevel",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "somelevel") {
|
||||
t.Fatal("Expected JSON to format level key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONTimeKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyTime: "timeywimey",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "timeywimey") {
|
||||
t.Fatal("Expected JSON to format time key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONDisableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
DisableTimestamp: true,
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Did not prevent timestamp", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEnableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Timestamp not present", s)
|
||||
}
|
||||
}
|
||||
323
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
323
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
@@ -1,323 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged.
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
// Reusable empty entry
|
||||
entryPool sync.Pool
|
||||
}
|
||||
|
||||
type MutexWrap struct {
|
||||
lock sync.Mutex
|
||||
disabled bool
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Lock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Unlock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Disable() {
|
||||
mw.disabled = true
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) newEntry() *Entry {
|
||||
entry, ok := logger.entryPool.Get().(*Entry)
|
||||
if ok {
|
||||
return entry
|
||||
}
|
||||
return NewEntry(logger)
|
||||
}
|
||||
|
||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||
logger.entryPool.Put(entry)
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// If you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithFields(fields)
|
||||
}
|
||||
|
||||
// Add an error as single field to the log entry. All it does is call
|
||||
// `WithError` for the given `error`.
|
||||
func (logger *Logger) WithError(err error) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Printf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Println(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
//When file is opened with appending mode, it's safe to
|
||||
//write concurrently to a file (within 4k message on Linux).
|
||||
//In these cases user can choose to disable the lock.
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
|
||||
func (logger *Logger) AddHook(hook Hook) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.Hooks.Add(hook)
|
||||
}
|
||||
61
vendor/github.com/Sirupsen/logrus/logger_bench_test.go
generated
vendored
61
vendor/github.com/Sirupsen/logrus/logger_bench_test.go
generated
vendored
@@ -1,61 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var loggerFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
func BenchmarkDummyLogger(b *testing.B) {
|
||||
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
b.Fatalf("%v", err)
|
||||
}
|
||||
defer nullf.Close()
|
||||
doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkDummyLoggerNoLock(b *testing.B) {
|
||||
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
b.Fatalf("%v", err)
|
||||
}
|
||||
defer nullf.Close()
|
||||
doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||
logger := Logger{
|
||||
Out: out,
|
||||
Level: InfoLevel,
|
||||
Formatter: formatter,
|
||||
}
|
||||
entry := logger.WithFields(fields)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
entry.Info("aaa")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||
logger := Logger{
|
||||
Out: out,
|
||||
Level: InfoLevel,
|
||||
Formatter: formatter,
|
||||
}
|
||||
logger.SetNoLock()
|
||||
entry := logger.WithFields(fields)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
entry.Info("aaa")
|
||||
}
|
||||
})
|
||||
}
|
||||
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@@ -1,143 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
FatalLevel,
|
||||
ErrorLevel,
|
||||
WarnLevel,
|
||||
InfoLevel,
|
||||
DebugLevel,
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var (
|
||||
_ StdLogger = &log.Logger{}
|
||||
_ StdLogger = &Entry{}
|
||||
_ StdLogger = &Logger{}
|
||||
)
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
||||
|
||||
// The FieldLogger interface generalizes the Entry and Logger types
|
||||
type FieldLogger interface {
|
||||
WithField(key string, value interface{}) *Entry
|
||||
WithFields(fields Fields) *Entry
|
||||
WithError(err error) *Entry
|
||||
|
||||
Debugf(format string, args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warningf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
|
||||
Debug(args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Warn(args ...interface{})
|
||||
Warning(args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Panic(args ...interface{})
|
||||
|
||||
Debugln(args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
Println(args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
Warningln(args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
}
|
||||
386
vendor/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
386
vendor/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
@@ -1,386 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val := kvArr[1]
|
||||
if kvArr[1][0] == '"' {
|
||||
var err error
|
||||
val, err = strconv.Unquote(val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
llog := logger.WithField("context", "eating raw fish")
|
||||
|
||||
llog.Info("looks delicious")
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded first message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "looks delicious")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
llog.Warn("omg it is!")
|
||||
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded second message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "omg it is!")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("PANIC")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("FATAL")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("ERROR")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("WARN")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("WARNING")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("INFO")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("DEBUG")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
|
||||
func TestGetSetLevelRace(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
if i%2 == 0 {
|
||||
SetLevel(InfoLevel)
|
||||
} else {
|
||||
GetLevel()
|
||||
}
|
||||
}(i)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestLoggingRace(t *testing.T) {
|
||||
logger := New()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
logger.Info("info")
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Compile test
|
||||
func TestLogrusInterface(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
fn := func(l FieldLogger) {
|
||||
b := l.WithField("key", "value")
|
||||
b.Debug("Test")
|
||||
}
|
||||
// test logger
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
fn(logger)
|
||||
|
||||
// test Entry
|
||||
e := logger.WithField("another", "value")
|
||||
fn(e)
|
||||
}
|
||||
|
||||
// Implements io.Writer using channels for synchronization, so we can wait on
|
||||
// the Entry.Writer goroutine to write in a non-racey way. This does assume that
|
||||
// there is a single call to Logger.Out for each message.
|
||||
type channelWriter chan []byte
|
||||
|
||||
func (cw channelWriter) Write(p []byte) (int, error) {
|
||||
cw <- p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestEntryWriter(t *testing.T) {
|
||||
cw := channelWriter(make(chan []byte, 1))
|
||||
log := New()
|
||||
log.Out = cw
|
||||
log.Formatter = new(JSONFormatter)
|
||||
log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
|
||||
|
||||
bs := <-cw
|
||||
var fields Fields
|
||||
err := json.Unmarshal(bs, &fields)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fields["foo"], "bar")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
}
|
||||
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios unix.Termios
|
||||
14
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
14
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios unix.Termios
|
||||
191
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
191
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@@ -1,191 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
}
|
||||
|
||||
// TextFormatter formats logs into text
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
||||
// Force disabling colors.
|
||||
DisableColors bool
|
||||
|
||||
// Disable timestamp logging. useful when output is redirected to logging
|
||||
// system that already adds timestamps.
|
||||
DisableTimestamp bool
|
||||
|
||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
// the time passed since beginning of execution.
|
||||
FullTimestamp bool
|
||||
|
||||
// TimestampFormat to use for display when a full timestamp is printed
|
||||
TimestampFormat string
|
||||
|
||||
// The fields are sorted by default for a consistent output. For applications
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
}
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
levelColor = gray
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
if b.Len() > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
stringVal, ok := value.(string)
|
||||
if !ok {
|
||||
stringVal = fmt.Sprint(value)
|
||||
}
|
||||
|
||||
if !f.needsQuoting(stringVal) {
|
||||
b.WriteString(stringVal)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||
}
|
||||
}
|
||||
141
vendor/github.com/Sirupsen/logrus/text_formatter_test.go
generated
vendored
141
vendor/github.com/Sirupsen/logrus/text_formatter_test.go
generated
vendored
@@ -1,141 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFormatting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
|
||||
if string(b) != tc.expected {
|
||||
t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuoting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
checkQuoting := func(q bool, value interface{}) {
|
||||
b, _ := tf.Format(WithField("test", value))
|
||||
idx := bytes.Index(b, ([]byte)("test="))
|
||||
cont := bytes.Contains(b[idx+5:], []byte("\""))
|
||||
if cont != q {
|
||||
if q {
|
||||
t.Errorf("quoting expected for: %#v", value)
|
||||
} else {
|
||||
t.Errorf("quoting not expected for: %#v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkQuoting(false, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(false, "v1.0")
|
||||
checkQuoting(false, "1234567890")
|
||||
checkQuoting(false, "/foobar")
|
||||
checkQuoting(false, "foo_bar")
|
||||
checkQuoting(false, "foo@bar")
|
||||
checkQuoting(false, "foobar^")
|
||||
checkQuoting(false, "+/-_^@f.oobar")
|
||||
checkQuoting(true, "foobar$")
|
||||
checkQuoting(true, "&foobar")
|
||||
checkQuoting(true, "x y")
|
||||
checkQuoting(true, "x,y")
|
||||
checkQuoting(false, errors.New("invalid"))
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
|
||||
// Test for quoting empty fields.
|
||||
tf.QuoteEmptyFields = true
|
||||
checkQuoting(true, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
}
|
||||
|
||||
func TestEscaping(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`ba"r`, `ba\"r`},
|
||||
{`ba'r`, `ba'r`},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscaping_Interface(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
ts := time.Now()
|
||||
|
||||
testCases := []struct {
|
||||
value interface{}
|
||||
expected string
|
||||
}{
|
||||
{ts, fmt.Sprintf("\"%s\"", ts.String())},
|
||||
{errors.New("error: something went wrong"), "\"error: something went wrong\""},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampFormat(t *testing.T) {
|
||||
checkTimeStr := func(format string) {
|
||||
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
|
||||
customStr, _ := customFormatter.Format(WithField("test", "test"))
|
||||
timeStart := bytes.Index(customStr, ([]byte)("time="))
|
||||
timeEnd := bytes.Index(customStr, ([]byte)("level="))
|
||||
timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
|
||||
if format == "" {
|
||||
format = time.RFC3339
|
||||
}
|
||||
_, e := time.Parse(format, (string)(timeStr))
|
||||
if e != nil {
|
||||
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
|
||||
}
|
||||
}
|
||||
|
||||
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
|
||||
checkTimeStr("Mon Jan _2 15:04:05 2006")
|
||||
checkTimeStr("")
|
||||
}
|
||||
|
||||
func TestDisableTimestampWithColoredOutput(t *testing.T) {
|
||||
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
|
||||
|
||||
b, _ := tf.Format(WithField("test", "test"))
|
||||
if strings.Contains(string(b), "[0000]") {
|
||||
t.Error("timestamp not expected when DisableTimestamp is true")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add tests for sorting etc., this requires a parser for the text
|
||||
// formatter output.
|
||||
62
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
62
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (logger *Logger) Writer() *io.PipeWriter {
|
||||
return logger.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
func writerFinalizer(writer *io.PipeWriter) {
|
||||
writer.Close()
|
||||
}
|
||||
33
vendor/github.com/apache/thrift/.travis.yml
generated
vendored
33
vendor/github.com/apache/thrift/.travis.yml
generated
vendored
@@ -42,9 +42,14 @@ env:
|
||||
- BUILD_LIBS="CPP C_GLIB HASKELL JAVA PYTHON TESTING TUTORIALS" # only meaningful for CMake builds
|
||||
|
||||
matrix:
|
||||
- TEST_NAME="Cross Language Tests (Binary, Header, JSON Protocols)"
|
||||
- TEST_NAME="Cross Language Tests (Binary Protocol)"
|
||||
SCRIPT="cross-test.sh"
|
||||
BUILD_ARG="-'(binary|header|json)'"
|
||||
BUILD_ARG="-'(binary)'"
|
||||
BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
|
||||
|
||||
- TEST_NAME="Cross Language Tests (Header, JSON Protocols)"
|
||||
SCRIPT="cross-test.sh"
|
||||
BUILD_ARG="-'(header|json)'"
|
||||
BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
|
||||
|
||||
- TEST_NAME="Cross Language Tests (Compact and Multiplexed Protocols)"
|
||||
@@ -54,20 +59,22 @@ env:
|
||||
|
||||
# Autotools builds
|
||||
# TODO: Remove them once migrated to CMake
|
||||
- TEST_NAME="Autotools (CentOS 7.3)"
|
||||
DISTRO=centos-7.3
|
||||
SCRIPT="autotools.sh"
|
||||
BUILD_ENV="-e CC=gcc -e CXX=g++"
|
||||
BUILD_ARG="--without-cpp --without-csharp --without-c_glib --without-d -without-dart --without-erlang --without-go --without-haskell --without-haxe"
|
||||
# centos-7.3 build jobs appear to be unstable/hang...
|
||||
# TEST_NAME="Autotools (CentOS 7.3)"
|
||||
# DISTRO=centos-7.3
|
||||
# SCRIPT="autotools.sh"
|
||||
# BUILD_ENV="-e CC=gcc -e CXX=g++"
|
||||
# BUILD_ARG="--without-cpp --without-csharp --without-c_glib --without-d -without-dart --without-erlang --without-go --without-haskell --without-haxe"
|
||||
|
||||
- TEST_NAME="Autotools (Ubuntu Xenial)"
|
||||
SCRIPT="autotools.sh"
|
||||
BUILD_ENV="-e CC=gcc -e CXX=g++"
|
||||
BUILD_ARG="--enable-plugin --without-java --without-lua --without-nodejs --without-perl --without-php --without-php_extension --without-python --without-py3 --without-ruby --without-rust"
|
||||
BUILD_ARG="--enable-plugin" # --without-java --without-lua --without-nodejs --without-perl --without-php --without-php_extension --without-python --without-py3 --without-ruby --without-rust"
|
||||
|
||||
# CMake builds
|
||||
- TEST_NAME="CMake (CentOS 7.3)"
|
||||
DISTRO=centos-7.3
|
||||
# centos-7.3 build jobs appear to be unstable/hang...
|
||||
# TEST_NAME="CMake (CentOS 7.3)"
|
||||
# DISTRO=centos-7.3
|
||||
|
||||
- TEST_NAME="CMake (Ubuntu Xenial)"
|
||||
|
||||
@@ -76,7 +83,7 @@ env:
|
||||
BUILD_LIBS="CPP TESTING TUTORIALS"
|
||||
BUILD_ARG="-DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
|
||||
|
||||
- TEST_NAME="C++ Plugin (Std Thread)"
|
||||
- TEST_NAME="C++ (Std Thread) and Plugin"
|
||||
BUILD_LIBS="CPP TESTING TUTORIALS"
|
||||
BUILD_ARG="-DWITH_PLUGIN=ON -DWITH_STDTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
|
||||
|
||||
@@ -89,11 +96,15 @@ env:
|
||||
|
||||
# C and C++ undefined behavior. This wraps autotools.sh, but each binary crashes if
|
||||
# undefined behavior occurs. Skips the known flaky tests.
|
||||
# Unstable: THRIFT-4064 needs to be fixed perhaps?
|
||||
- TEST_NAME="UBSan"
|
||||
SCRIPT="ubsan.sh"
|
||||
BUILD_ARG="--without-haskell --without-nodejs --without-perl --without-python"
|
||||
UNSTABLE=true
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- env: UNSTABLE=true
|
||||
include:
|
||||
# QA jobs for code analytics and metrics
|
||||
#
|
||||
|
||||
2
vendor/github.com/apache/thrift/appveyor.yml
generated
vendored
2
vendor/github.com/apache/thrift/appveyor.yml
generated
vendored
@@ -40,7 +40,7 @@ environment:
|
||||
LIBEVENT_VERSION: 2.0.22
|
||||
QT_VERSION: 5.6
|
||||
ZLIB_VERSION: 1.2.8
|
||||
DISABLED_TESTS: StressTestNonBlocking|concurrency_test
|
||||
DISABLED_TESTS: StressTestNonBlocking
|
||||
|
||||
- PROFILE: MSVC2015
|
||||
PLATFORM: x64
|
||||
|
||||
6
vendor/github.com/apache/thrift/build/docker/centos-7.3/Dockerfile
generated
vendored
6
vendor/github.com/apache/thrift/build/docker/centos-7.3/Dockerfile
generated
vendored
@@ -10,11 +10,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Apache Thrift Docker build environment for Centos
|
||||
# Apache Thrift Docker build environment for CentOS
|
||||
#
|
||||
# Known missing client libraries:
|
||||
# - dotnet (will update to 2.0.0 separately)
|
||||
# - haxe (not in debian stretch)
|
||||
# - haxe (not in centos)
|
||||
|
||||
FROM centos:7.3.1611
|
||||
MAINTAINER Apache Thrift <dev@thrift.apache.org>
|
||||
@@ -33,12 +33,14 @@ RUN yum install -y \
|
||||
flex \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
gdb \
|
||||
git \
|
||||
libtool \
|
||||
m4 \
|
||||
make \
|
||||
tar \
|
||||
unzip \
|
||||
valgrind \
|
||||
wget && \
|
||||
ln -s /usr/bin/cmake3 /usr/bin/cmake && \
|
||||
ln -s /usr/bin/cpack3 /usr/bin/cpack && \
|
||||
|
||||
1
vendor/github.com/apache/thrift/build/docker/debian-stretch/Dockerfile
generated
vendored
1
vendor/github.com/apache/thrift/build/docker/debian-stretch/Dockerfile
generated
vendored
@@ -56,6 +56,7 @@ RUN apt-get install -y --no-install-recommends \
|
||||
gdb \
|
||||
ninja-build \
|
||||
pkg-config \
|
||||
valgrind \
|
||||
vim
|
||||
|
||||
|
||||
|
||||
4
vendor/github.com/apache/thrift/build/docker/ubuntu-xenial/Dockerfile
generated
vendored
4
vendor/github.com/apache/thrift/build/docker/ubuntu-xenial/Dockerfile
generated
vendored
@@ -60,6 +60,7 @@ RUN apt-get install -y --no-install-recommends \
|
||||
llvm \
|
||||
ninja-build \
|
||||
pkg-config \
|
||||
valgrind \
|
||||
vim
|
||||
ENV PATH /usr/lib/llvm-3.8/bin:$PATH
|
||||
|
||||
@@ -140,7 +141,8 @@ RUN apt-get install -y --no-install-recommends \
|
||||
neko-dev \
|
||||
libneko0
|
||||
RUN haxelib setup --always /usr/share/haxe/lib && \
|
||||
haxelib install --always hxcpp
|
||||
haxelib install --always hxcpp 3.4.64
|
||||
# note: hxcpp 3.4.185 (latest) no longer ships static libraries, and caused a build failure
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
`# Java dependencies` \
|
||||
|
||||
@@ -2820,6 +2820,8 @@ void t_csharp_generator::generate_csharp_property(ofstream& out,
|
||||
}
|
||||
if (ttype->is_base_type()) {
|
||||
use_nullable = ((t_base_type*)ttype)->get_base() != t_base_type::TYPE_STRING;
|
||||
} else if (ttype->is_enum()) {
|
||||
use_nullable = true;
|
||||
}
|
||||
}
|
||||
indent(out) << "return " << fieldPrefix + tfield->get_name() << ";" << endl;
|
||||
|
||||
74
vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_delphi_generator.cc
generated
vendored
74
vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_delphi_generator.cc
generated
vendored
@@ -2023,13 +2023,13 @@ void t_delphi_generator::generate_service_client(t_service* tservice) {
|
||||
indent_impl(s_service_impl) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(s_service_impl) << argsvar << " : " << args_intfnm << ";" << endl;
|
||||
indent_impl(s_service_impl) << msgvar << " : Thrift.Protocol.IMessage;" << endl;
|
||||
indent_impl(s_service_impl) << msgvar << " : Thrift.Protocol.TThriftMessage;" << endl;
|
||||
indent_down_impl();
|
||||
indent_impl(s_service_impl) << "begin" << endl;
|
||||
indent_up_impl();
|
||||
|
||||
indent_impl(s_service_impl) << "seqid_ := seqid_ + 1;" << endl;
|
||||
indent_impl(s_service_impl) << msgvar << " := Thrift.Protocol.TMessageImpl.Create('" << funname
|
||||
indent_impl(s_service_impl) << "Thrift.Protocol.Init( " << msgvar << ", '" << funname
|
||||
<< "', " << ((*f_iter)->is_oneway() ? "TMessageType.Oneway"
|
||||
: "TMessageType.Call")
|
||||
<< ", seqid_);" << endl;
|
||||
@@ -2076,7 +2076,7 @@ void t_delphi_generator::generate_service_client(t_service* tservice) {
|
||||
indent_impl(s_service_impl) << function_signature(&recv_function, full_cls) << endl;
|
||||
indent_impl(s_service_impl) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(s_service_impl) << msgvar << " : Thrift.Protocol.IMessage;" << endl;
|
||||
indent_impl(s_service_impl) << msgvar << " : Thrift.Protocol.TThriftMessage;" << endl;
|
||||
if (xceptions.size() > 0) {
|
||||
indent_impl(s_service_impl) << exceptvar << " : Exception;" << endl;
|
||||
}
|
||||
@@ -2234,7 +2234,7 @@ void t_delphi_generator::generate_service_server(t_service* tservice) {
|
||||
;
|
||||
indent_impl(s_service_impl) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(s_service_impl) << "msg : Thrift.Protocol.IMessage;" << endl;
|
||||
indent_impl(s_service_impl) << "msg : Thrift.Protocol.TThriftMessage;" << endl;
|
||||
indent_impl(s_service_impl) << "fn : TProcessFunction;" << endl;
|
||||
indent_impl(s_service_impl) << "x : TApplicationException;" << endl;
|
||||
if (events_) {
|
||||
@@ -2257,7 +2257,7 @@ void t_delphi_generator::generate_service_server(t_service* tservice) {
|
||||
"TApplicationExceptionUnknownMethod.Create("
|
||||
"'Invalid method name: ''' + msg.Name + '''');" << endl;
|
||||
indent_impl(s_service_impl)
|
||||
<< "msg := Thrift.Protocol.TMessageImpl.Create(msg.Name, TMessageType.Exception, msg.SeqID);"
|
||||
<< "Thrift.Protocol.Init( msg, msg.Name, TMessageType.Exception, msg.SeqID);"
|
||||
<< endl;
|
||||
indent_impl(s_service_impl) << "oprot.WriteMessageBegin( msg);" << endl;
|
||||
indent_impl(s_service_impl) << "x.Write(oprot);" << endl;
|
||||
@@ -2373,7 +2373,7 @@ void t_delphi_generator::generate_process_function(t_service* tservice, t_functi
|
||||
indent_up_impl();
|
||||
indent_impl(s_service_impl) << "args: " << args_intfnm << ";" << endl;
|
||||
if (!tfunction->is_oneway()) {
|
||||
indent_impl(s_service_impl) << "msg: Thrift.Protocol.IMessage;" << endl;
|
||||
indent_impl(s_service_impl) << "msg: Thrift.Protocol.TThriftMessage;" << endl;
|
||||
indent_impl(s_service_impl) << "ret: " << result_intfnm << ";" << endl;
|
||||
indent_impl(s_service_impl) << "appx : TApplicationException;" << endl;
|
||||
}
|
||||
@@ -2459,7 +2459,7 @@ void t_delphi_generator::generate_process_function(t_service* tservice, t_functi
|
||||
if(events_) {
|
||||
indent_impl(s_service_impl) << "if events <> nil then events.PreWrite;" << endl;
|
||||
}
|
||||
indent_impl(s_service_impl) << "msg := Thrift.Protocol.TMessageImpl.Create('"
|
||||
indent_impl(s_service_impl) << "Thrift.Protocol.Init( msg, '"
|
||||
<< tfunction->get_name() << "', TMessageType.Exception, seqid);"
|
||||
<< endl;
|
||||
indent_impl(s_service_impl) << "oprot.WriteMessageBegin( msg);" << endl;
|
||||
@@ -2487,7 +2487,7 @@ void t_delphi_generator::generate_process_function(t_service* tservice, t_functi
|
||||
if (events_) {
|
||||
indent_impl(s_service_impl) << "if events <> nil then events.PreWrite;" << endl;
|
||||
}
|
||||
indent_impl(s_service_impl) << "msg := Thrift.Protocol.TMessageImpl.Create('"
|
||||
indent_impl(s_service_impl) << "Thrift.Protocol.Init( msg, '"
|
||||
<< tfunction->get_name() << "', TMessageType.Reply, seqid); "
|
||||
<< endl;
|
||||
indent_impl(s_service_impl) << "oprot.WriteMessageBegin( msg); " << endl;
|
||||
@@ -2619,11 +2619,11 @@ void t_delphi_generator::generate_deserialize_container(ostream& out,
|
||||
}
|
||||
|
||||
if (ttype->is_map()) {
|
||||
local_var = obj + ": IMap;";
|
||||
local_var = obj + ": TThriftMap;";
|
||||
} else if (ttype->is_set()) {
|
||||
local_var = obj + ": ISet;";
|
||||
local_var = obj + ": TThriftSet;";
|
||||
} else if (ttype->is_list()) {
|
||||
local_var = obj + ": IList;";
|
||||
local_var = obj + ": TThriftList;";
|
||||
}
|
||||
local_vars << " " << local_var << endl;
|
||||
counter = tmp("_i");
|
||||
@@ -2803,23 +2803,23 @@ void t_delphi_generator::generate_serialize_container(ostream& out,
|
||||
string obj;
|
||||
if (ttype->is_map()) {
|
||||
obj = tmp("map");
|
||||
local_vars << " " << obj << " : IMap;" << endl;
|
||||
indent_impl(out) << obj << " := TMapImpl.Create( "
|
||||
local_vars << " " << obj << " : TThriftMap;" << endl;
|
||||
indent_impl(out) << "Thrift.Protocol.Init( " << obj << ", "
|
||||
<< type_to_enum(((t_map*)ttype)->get_key_type()) << ", "
|
||||
<< type_to_enum(((t_map*)ttype)->get_val_type()) << ", " << prefix
|
||||
<< ".Count);" << endl;
|
||||
indent_impl(out) << "oprot.WriteMapBegin( " << obj << ");" << endl;
|
||||
} else if (ttype->is_set()) {
|
||||
obj = tmp("set_");
|
||||
local_vars << " " << obj << " : ISet;" << endl;
|
||||
indent_impl(out) << obj << " := TSetImpl.Create("
|
||||
local_vars << " " << obj << " : TThriftSet;" << endl;
|
||||
indent_impl(out) << "Thrift.Protocol.Init( " << obj << ", "
|
||||
<< type_to_enum(((t_set*)ttype)->get_elem_type()) << ", " << prefix
|
||||
<< ".Count);" << endl;
|
||||
indent_impl(out) << "oprot.WriteSetBegin( " << obj << ");" << endl;
|
||||
} else if (ttype->is_list()) {
|
||||
obj = tmp("list_");
|
||||
local_vars << " " << obj << " : IList;" << endl;
|
||||
indent_impl(out) << obj << " := TListImpl.Create("
|
||||
local_vars << " " << obj << " : TThriftList;" << endl;
|
||||
indent_impl(out) << "Thrift.Protocol.Init( " << obj << ", "
|
||||
<< type_to_enum(((t_list*)ttype)->get_elem_type()) << ", " << prefix
|
||||
<< ".Count);" << endl;
|
||||
indent_impl(out) << "oprot.WriteListBegin( " << obj << ");" << endl;
|
||||
@@ -3548,7 +3548,7 @@ void t_delphi_generator::generate_delphi_struct_reader_impl(ostream& out,
|
||||
<< ") then begin" << endl;
|
||||
indent_up_impl();
|
||||
|
||||
generate_deserialize_field(code_block, is_exception, *f_iter, "", local_vars);
|
||||
generate_deserialize_field(code_block, is_exception, *f_iter, "Self.", local_vars);
|
||||
|
||||
// required field?
|
||||
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
|
||||
@@ -3617,8 +3617,8 @@ void t_delphi_generator::generate_delphi_struct_reader_impl(ostream& out,
|
||||
<< endl;
|
||||
indent_impl(out) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(out) << "field_ : IField;" << endl;
|
||||
indent_impl(out) << "struc : IStruct;" << endl;
|
||||
indent_impl(out) << "field_ : TThriftField;" << endl;
|
||||
indent_impl(out) << "struc : TThriftStruct;" << endl;
|
||||
indent_down_impl();
|
||||
out << local_vars.str() << endl;
|
||||
out << code_block.str();
|
||||
@@ -3642,11 +3642,11 @@ void t_delphi_generator::generate_delphi_struct_result_writer_impl(ostream& out,
|
||||
indent_impl(local_vars) << "tracker : IProtocolRecursionTracker;" << endl;
|
||||
indent_impl(code_block) << "tracker := oprot.NextRecursionLevel;" << endl;
|
||||
|
||||
indent_impl(code_block) << "struc := TStructImpl.Create('" << name << "');" << endl;
|
||||
indent_impl(code_block) << "Thrift.Protocol.Init( struc, '" << name << "');" << endl;
|
||||
indent_impl(code_block) << "oprot.WriteStructBegin(struc);" << endl;
|
||||
|
||||
if (fields.size() > 0) {
|
||||
indent_impl(code_block) << "field_ := TFieldImpl.Create;" << endl;
|
||||
indent_impl(code_block) << "Thrift.Protocol.Init( field_);" << endl;
|
||||
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
|
||||
indent_impl(code_block) << "if (__isset_" << prop_name(*f_iter, is_exception) << ") then"
|
||||
<< endl;
|
||||
@@ -3657,7 +3657,7 @@ void t_delphi_generator::generate_delphi_struct_result_writer_impl(ostream& out,
|
||||
<< endl;
|
||||
indent_impl(code_block) << "field_.ID := " << (*f_iter)->get_key() << ";" << endl;
|
||||
indent_impl(code_block) << "oprot.WriteFieldBegin(field_);" << endl;
|
||||
generate_serialize_field(code_block, is_exception, *f_iter, "", local_vars);
|
||||
generate_serialize_field(code_block, is_exception, *f_iter, "Self.", local_vars);
|
||||
indent_impl(code_block) << "oprot.WriteFieldEnd();" << endl;
|
||||
indent_down_impl();
|
||||
}
|
||||
@@ -3677,10 +3677,10 @@ void t_delphi_generator::generate_delphi_struct_result_writer_impl(ostream& out,
|
||||
<< endl;
|
||||
indent_impl(out) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(out) << "struc : IStruct;" << endl;
|
||||
indent_impl(out) << "struc : TThriftStruct;" << endl;
|
||||
|
||||
if (fields.size() > 0) {
|
||||
indent_impl(out) << "field_ : IField;" << endl;
|
||||
indent_impl(out) << "field_ : TThriftField;" << endl;
|
||||
}
|
||||
|
||||
out << local_vars.str();
|
||||
@@ -3706,11 +3706,11 @@ void t_delphi_generator::generate_delphi_struct_writer_impl(ostream& out,
|
||||
indent_impl(local_vars) << "tracker : IProtocolRecursionTracker;" << endl;
|
||||
indent_impl(code_block) << "tracker := oprot.NextRecursionLevel;" << endl;
|
||||
|
||||
indent_impl(code_block) << "struc := TStructImpl.Create('" << name << "');" << endl;
|
||||
indent_impl(code_block) << "Thrift.Protocol.Init( struc, '" << name << "');" << endl;
|
||||
indent_impl(code_block) << "oprot.WriteStructBegin(struc);" << endl;
|
||||
|
||||
if (fields.size() > 0) {
|
||||
indent_impl(code_block) << "field_ := TFieldImpl.Create;" << endl;
|
||||
indent_impl(code_block) << "Thrift.Protocol.Init( field_);" << endl;
|
||||
}
|
||||
|
||||
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
|
||||
@@ -3720,13 +3720,13 @@ void t_delphi_generator::generate_delphi_struct_writer_impl(ostream& out,
|
||||
bool has_isset = (!is_required);
|
||||
if (is_required && null_allowed) {
|
||||
null_allowed = false;
|
||||
indent_impl(code_block) << "if (" << fieldname << " = nil)" << endl;
|
||||
indent_impl(code_block) << "if (Self." << fieldname << " = nil)" << endl;
|
||||
indent_impl(code_block) << "then raise TProtocolExceptionInvalidData.Create("
|
||||
<< "'required field " << fieldname << " not set');"
|
||||
<< endl;
|
||||
}
|
||||
if (null_allowed) {
|
||||
indent_impl(code_block) << "if (" << fieldname << " <> nil)";
|
||||
indent_impl(code_block) << "if (Self." << fieldname << " <> nil)";
|
||||
if (has_isset) {
|
||||
code_block << " and __isset_" << fieldname;
|
||||
}
|
||||
@@ -3743,7 +3743,7 @@ void t_delphi_generator::generate_delphi_struct_writer_impl(ostream& out,
|
||||
<< endl;
|
||||
indent_impl(code_block) << "field_.ID := " << (*f_iter)->get_key() << ";" << endl;
|
||||
indent_impl(code_block) << "oprot.WriteFieldBegin(field_);" << endl;
|
||||
generate_serialize_field(code_block, is_exception, *f_iter, "", local_vars);
|
||||
generate_serialize_field(code_block, is_exception, *f_iter, "Self.", local_vars);
|
||||
indent_impl(code_block) << "oprot.WriteFieldEnd();" << endl;
|
||||
if (null_allowed || has_isset) {
|
||||
indent_down_impl();
|
||||
@@ -3765,9 +3765,9 @@ void t_delphi_generator::generate_delphi_struct_writer_impl(ostream& out,
|
||||
<< endl;
|
||||
indent_impl(out) << "var" << endl;
|
||||
indent_up_impl();
|
||||
indent_impl(out) << "struc : IStruct;" << endl;
|
||||
indent_impl(out) << "struc : TThriftStruct;" << endl;
|
||||
if (fields.size() > 0) {
|
||||
indent_impl(out) << "field_ : IField;" << endl;
|
||||
indent_impl(out) << "field_ : TThriftField;" << endl;
|
||||
}
|
||||
out << local_vars.str();
|
||||
indent_down_impl();
|
||||
@@ -3825,7 +3825,7 @@ void t_delphi_generator::generate_delphi_struct_tostring_impl(ostream& out,
|
||||
bool null_allowed = type_can_be_null((*f_iter)->get_type());
|
||||
bool is_optional = ((*f_iter)->get_req() != t_field::T_REQUIRED);
|
||||
if (null_allowed) {
|
||||
indent_impl(out) << "if (" << prop_name((*f_iter), is_exception) << " <> nil)";
|
||||
indent_impl(out) << "if (Self." << prop_name((*f_iter), is_exception) << " <> nil)";
|
||||
if (is_optional) {
|
||||
out << " and __isset_" << prop_name(*f_iter, is_exception);
|
||||
}
|
||||
@@ -3857,14 +3857,14 @@ void t_delphi_generator::generate_delphi_struct_tostring_impl(ostream& out,
|
||||
}
|
||||
|
||||
if (ttype->is_xception() || ttype->is_struct()) {
|
||||
indent_impl(out) << "if (" << prop_name((*f_iter), is_exception) << " = nil) then " << tmp_sb
|
||||
<< ".Append('<null>') else " << tmp_sb << ".Append("
|
||||
indent_impl(out) << "if (Self." << prop_name((*f_iter), is_exception) << " = nil) then " << tmp_sb
|
||||
<< ".Append('<null>') else " << tmp_sb << ".Append( Self."
|
||||
<< prop_name((*f_iter), is_exception) << ".ToString());" << endl;
|
||||
} else if (ttype->is_enum()) {
|
||||
indent_impl(out) << tmp_sb << ".Append(Integer(" << prop_name((*f_iter), is_exception)
|
||||
indent_impl(out) << tmp_sb << ".Append(Integer( Self." << prop_name((*f_iter), is_exception)
|
||||
<< "));" << endl;
|
||||
} else {
|
||||
indent_impl(out) << tmp_sb << ".Append(" << prop_name((*f_iter), is_exception) << ");"
|
||||
indent_impl(out) << tmp_sb << ".Append( Self." << prop_name((*f_iter), is_exception) << ");"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
|
||||
38
vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_py_generator.cc
generated
vendored
38
vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_py_generator.cc
generated
vendored
@@ -58,6 +58,7 @@ public:
|
||||
gen_dynbase_ = false;
|
||||
gen_slots_ = false;
|
||||
gen_tornado_ = false;
|
||||
gen_zope_interface_ = false;
|
||||
gen_twisted_ = false;
|
||||
gen_dynamic_ = false;
|
||||
coding_ = "";
|
||||
@@ -105,8 +106,11 @@ public:
|
||||
} else if( iter->first.compare("dynimport") == 0) {
|
||||
gen_dynbase_ = true;
|
||||
import_dynbase_ = (iter->second);
|
||||
} else if( iter->first.compare("zope.interface") == 0) {
|
||||
gen_zope_interface_ = true;
|
||||
} else if( iter->first.compare("twisted") == 0) {
|
||||
gen_twisted_ = true;
|
||||
gen_zope_interface_ = true;
|
||||
} else if( iter->first.compare("tornado") == 0) {
|
||||
gen_tornado_ = true;
|
||||
} else if( iter->first.compare("coding") == 0) {
|
||||
@@ -290,6 +294,11 @@ private:
|
||||
|
||||
std::string copy_options_;
|
||||
|
||||
/**
|
||||
* True if we should generate code for use with zope.interface.
|
||||
*/
|
||||
bool gen_zope_interface_;
|
||||
|
||||
/**
|
||||
* True if we should generate Twisted-friendly RPC services.
|
||||
*/
|
||||
@@ -425,7 +434,7 @@ string t_py_generator::py_imports() {
|
||||
<< endl
|
||||
<< "from thrift.protocol.TProtocol import TProtocolException"
|
||||
<< endl
|
||||
<< "from thrift.TRecursive import fix_spec"
|
||||
<< "from thrift.TRecursive import fix_spec"
|
||||
<< endl;
|
||||
|
||||
if (gen_utf8strings_) {
|
||||
@@ -623,10 +632,10 @@ string t_py_generator::render_const_value(t_type* type, t_const_value* value) {
|
||||
return out.str();
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Generates the "forward declarations" for python structs.
|
||||
* These are actually full class definitions so that calls to generate_struct
|
||||
* can add the thrift_spec field. This is needed so that all thrift_spec
|
||||
* can add the thrift_spec field. This is needed so that all thrift_spec
|
||||
* definitions are grouped at the end of the file to enable co-recursive structs.
|
||||
*/
|
||||
void t_py_generator::generate_forward_declaration(t_struct* tstruct) {
|
||||
@@ -1091,10 +1100,12 @@ void t_py_generator::generate_service(t_service* tservice) {
|
||||
<< "from thrift.Thrift import TProcessor" << endl
|
||||
<< "from thrift.transport import TTransport" << endl
|
||||
<< import_dynbase_;
|
||||
if (gen_zope_interface_) {
|
||||
f_service_ << "from zope.interface import Interface, implementer" << endl;
|
||||
}
|
||||
|
||||
if (gen_twisted_) {
|
||||
f_service_ << "from zope.interface import Interface, implementer" << endl
|
||||
<< "from twisted.internet import defer" << endl
|
||||
f_service_ << "from twisted.internet import defer" << endl
|
||||
<< "from thrift.transport import TTwisted" << endl;
|
||||
} else if (gen_tornado_) {
|
||||
f_service_ << "from tornado import gen" << endl;
|
||||
@@ -1171,7 +1182,7 @@ void t_py_generator::generate_service_interface(t_service* tservice) {
|
||||
extends = type_name(tservice->get_extends());
|
||||
extends_if = "(" + extends + ".Iface)";
|
||||
} else {
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
extends_if = "(Interface)";
|
||||
} else if (gen_newstyle_ || gen_dynamic_ || gen_tornado_) {
|
||||
extends_if = "(object)";
|
||||
@@ -1214,20 +1225,20 @@ void t_py_generator::generate_service_client(t_service* tservice) {
|
||||
string extends_client = "";
|
||||
if (tservice->get_extends() != NULL) {
|
||||
extends = type_name(tservice->get_extends());
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
extends_client = "(" + extends + ".Client)";
|
||||
} else {
|
||||
extends_client = extends + ".Client, ";
|
||||
}
|
||||
} else {
|
||||
if (gen_twisted_ && (gen_newstyle_ || gen_dynamic_)) {
|
||||
if (gen_zope_interface_ && (gen_newstyle_ || gen_dynamic_)) {
|
||||
extends_client = "(object)";
|
||||
}
|
||||
}
|
||||
|
||||
f_service_ << endl << endl;
|
||||
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
f_service_ << "@implementer(Iface)" << endl
|
||||
<< "class Client" << extends_client << ":" << endl
|
||||
<< endl;
|
||||
@@ -1767,7 +1778,7 @@ void t_py_generator::generate_service_server(t_service* tservice) {
|
||||
f_service_ << endl << endl;
|
||||
|
||||
// Generate the header portion
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
f_service_ << "@implementer(Iface)" << endl
|
||||
<< "class Processor(" << extends_processor << "TProcessor):" << endl;
|
||||
} else {
|
||||
@@ -1779,7 +1790,7 @@ void t_py_generator::generate_service_server(t_service* tservice) {
|
||||
indent(f_service_) << "def __init__(self, handler):" << endl;
|
||||
indent_up();
|
||||
if (extends.empty()) {
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
f_service_ << indent() << "self._handler = Iface(handler)" << endl;
|
||||
} else {
|
||||
f_service_ << indent() << "self._handler = handler" << endl;
|
||||
@@ -1787,7 +1798,7 @@ void t_py_generator::generate_service_server(t_service* tservice) {
|
||||
|
||||
f_service_ << indent() << "self._processMap = {}" << endl;
|
||||
} else {
|
||||
if (gen_twisted_) {
|
||||
if (gen_zope_interface_) {
|
||||
f_service_ << indent() << extends << ".Processor.__init__(self, Iface(handler))" << endl;
|
||||
} else {
|
||||
f_service_ << indent() << extends << ".Processor.__init__(self, handler)" << endl;
|
||||
@@ -2536,7 +2547,7 @@ string t_py_generator::function_signature(t_function* tfunction, bool interface)
|
||||
vector<string> post;
|
||||
string signature = tfunction->get_name() + "(";
|
||||
|
||||
if (!(gen_twisted_ && interface)) {
|
||||
if (!(gen_zope_interface_ && interface)) {
|
||||
pre.push_back("self");
|
||||
}
|
||||
|
||||
@@ -2680,6 +2691,7 @@ string t_py_generator::type_to_spec_args(t_type* ttype) {
|
||||
THRIFT_REGISTER_GENERATOR(
|
||||
py,
|
||||
"Python",
|
||||
" zope.interface: Generate code for use with zope.interface.\n"
|
||||
" twisted: Generate Twisted-friendly RPC services.\n"
|
||||
" tornado: Generate code for use with Tornado.\n"
|
||||
" no_utf8strings: Do not Encode/decode strings using utf8 in the generated code. Basically no effect for Python 3.\n"
|
||||
|
||||
3
vendor/github.com/apache/thrift/configure.ac
generated
vendored
3
vendor/github.com/apache/thrift/configure.ac
generated
vendored
@@ -83,6 +83,9 @@ AS_IF([test "x$D_IMPORT_PREFIX" = x], [D_IMPORT_PREFIX="${includedir}/d2"])
|
||||
AC_ARG_VAR([DMD_LIBEVENT_FLAGS], [DMD flags for linking libevent (auto-detected if not set).])
|
||||
AC_ARG_VAR([DMD_OPENSSL_FLAGS], [DMD flags for linking OpenSSL (auto-detected if not set).])
|
||||
|
||||
AC_ARG_VAR([THRIFT], [Path to the thrift tool (needed for cross-compilation).])
|
||||
AS_IF([test "x$THRIFT" = x], [THRIFT=`pwd`/compiler/cpp/thrift])
|
||||
|
||||
AC_PROG_CC
|
||||
AC_PROG_CPP
|
||||
AC_PROG_CXX
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/c_glib/test/Makefile.am
generated
vendored
2
vendor/github.com/apache/thrift/lib/c_glib/test/Makefile.am
generated
vendored
@@ -237,8 +237,6 @@ nodist_libtestgencpp_la_SOURCES = \
|
||||
gen-cpp/ThriftTest_types.h
|
||||
libtestgencpp_la_CPPFLAGS = -I../../cpp/src $(BOOST_CPPFLAGS) -I./gen-cpp
|
||||
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
|
||||
gen-c_glib/t_test_container_test_types.c gen-c_glib/t_test_container_test_types.h gen-c_glib/t_test_container_service.c gen-c_glib/t_test_container_service.h: ContainerTest.thrift $(THRIFT)
|
||||
$(THRIFT) --gen c_glib $<
|
||||
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/cpp/Makefile.am
generated
vendored
2
vendor/github.com/apache/thrift/lib/cpp/Makefile.am
generated
vendored
@@ -265,8 +265,6 @@ include_qt_HEADERS = \
|
||||
src/thrift/qt/TQIODeviceTransport.h \
|
||||
src/thrift/qt/TQTcpServer.h
|
||||
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
|
||||
WINDOWS_DIST = \
|
||||
src/thrift/windows \
|
||||
thrift.sln \
|
||||
|
||||
48
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/BoostThreadFactory.cpp
generated
vendored
48
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/BoostThreadFactory.cpp
generated
vendored
@@ -51,6 +51,7 @@ public:
|
||||
|
||||
private:
|
||||
scoped_ptr<boost::thread> thread_;
|
||||
Monitor monitor_;
|
||||
STATE state_;
|
||||
weak_ptr<BoostThread> self_;
|
||||
bool detached_;
|
||||
@@ -71,25 +72,46 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void start() {
|
||||
if (state_ != uninitialized) {
|
||||
return;
|
||||
}
|
||||
STATE getState() const
|
||||
{
|
||||
Synchronized sync(monitor_);
|
||||
return state_;
|
||||
}
|
||||
|
||||
void setState(STATE newState)
|
||||
{
|
||||
Synchronized sync(monitor_);
|
||||
state_ = newState;
|
||||
|
||||
// unblock start() with the knowledge that the thread has actually
|
||||
// started running, which avoids a race in detached threads.
|
||||
if (newState == started) {
|
||||
monitor_.notify();
|
||||
}
|
||||
}
|
||||
|
||||
void start() {
|
||||
// Create reference
|
||||
shared_ptr<BoostThread>* selfRef = new shared_ptr<BoostThread>();
|
||||
*selfRef = self_.lock();
|
||||
|
||||
state_ = starting;
|
||||
setState(starting);
|
||||
|
||||
Synchronized sync(monitor_);
|
||||
|
||||
thread_.reset(new boost::thread(bind(threadMain, (void*)selfRef)));
|
||||
|
||||
if (detached_)
|
||||
thread_->detach();
|
||||
|
||||
// Wait for the thread to start and get far enough to grab everything
|
||||
// that it needs from the calling context, thus absolving the caller
|
||||
// from being required to hold on to runnable indefinitely.
|
||||
monitor_.wait();
|
||||
}
|
||||
|
||||
void join() {
|
||||
if (!detached_ && state_ != uninitialized) {
|
||||
if (!detached_ && getState() != uninitialized) {
|
||||
thread_->join();
|
||||
}
|
||||
}
|
||||
@@ -110,19 +132,11 @@ void* BoostThread::threadMain(void* arg) {
|
||||
shared_ptr<BoostThread> thread = *(shared_ptr<BoostThread>*)arg;
|
||||
delete reinterpret_cast<shared_ptr<BoostThread>*>(arg);
|
||||
|
||||
if (!thread) {
|
||||
return (void*)0;
|
||||
}
|
||||
|
||||
if (thread->state_ != starting) {
|
||||
return (void*)0;
|
||||
}
|
||||
|
||||
thread->state_ = started;
|
||||
thread->setState(started);
|
||||
thread->runnable()->run();
|
||||
|
||||
if (thread->state_ != stopping && thread->state_ != stopped) {
|
||||
thread->state_ = stopping;
|
||||
if (thread->getState() != stopping && thread->getState() != stopped) {
|
||||
thread->setState(stopping);
|
||||
}
|
||||
return (void*)0;
|
||||
}
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/BoostThreadFactory.h
generated
vendored
2
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/BoostThreadFactory.h
generated
vendored
@@ -20,8 +20,8 @@
|
||||
#ifndef _THRIFT_CONCURRENCY_BOOSTTHREADFACTORY_H_
|
||||
#define _THRIFT_CONCURRENCY_BOOSTTHREADFACTORY_H_ 1
|
||||
|
||||
#include <thrift/concurrency/Monitor.h>
|
||||
#include <thrift/concurrency/Thread.h>
|
||||
|
||||
#include <thrift/stdcxx.h>
|
||||
|
||||
namespace apache {
|
||||
|
||||
35
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/PosixThreadFactory.cpp
generated
vendored
35
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/PosixThreadFactory.cpp
generated
vendored
@@ -20,7 +20,7 @@
|
||||
#include <thrift/thrift-config.h>
|
||||
|
||||
#include <thrift/concurrency/Exception.h>
|
||||
#include <thrift/concurrency/Mutex.h>
|
||||
#include <thrift/concurrency/Monitor.h>
|
||||
#include <thrift/concurrency/PosixThreadFactory.h>
|
||||
|
||||
#if GOOGLE_PERFTOOLS_REGISTER_THREAD
|
||||
@@ -53,8 +53,8 @@ public:
|
||||
|
||||
private:
|
||||
pthread_t pthread_;
|
||||
Mutex state_mutex_;
|
||||
STATE state_;
|
||||
Monitor monitor_; // guard to protect state_ and also notification
|
||||
STATE state_; // to protect proper thread start behavior
|
||||
int policy_;
|
||||
int priority_;
|
||||
int stackSize_;
|
||||
@@ -96,14 +96,20 @@ public:
|
||||
|
||||
STATE getState() const
|
||||
{
|
||||
Guard g(state_mutex_);
|
||||
Synchronized sync(monitor_);
|
||||
return state_;
|
||||
}
|
||||
|
||||
void setState(STATE newState)
|
||||
{
|
||||
Guard g(state_mutex_);
|
||||
Synchronized sync(monitor_);
|
||||
state_ = newState;
|
||||
|
||||
// unblock start() with the knowledge that the thread has actually
|
||||
// started running, which avoids a race in detached threads.
|
||||
if (newState == started) {
|
||||
monitor_.notify();
|
||||
}
|
||||
}
|
||||
|
||||
void start() {
|
||||
@@ -154,9 +160,18 @@ public:
|
||||
|
||||
setState(starting);
|
||||
|
||||
Synchronized sync(monitor_);
|
||||
|
||||
if (pthread_create(&pthread_, &thread_attr, threadMain, (void*)selfRef) != 0) {
|
||||
throw SystemResourceException("pthread_create failed");
|
||||
}
|
||||
|
||||
// The caller may not choose to guarantee the scope of the Runnable
|
||||
// being used in the thread, so we must actually wait until the thread
|
||||
// starts before we return. If we do not wait, it would be possible
|
||||
// for the caller to start destructing the Runnable and the Thread,
|
||||
// and we would end up in a race. This was identified with valgrind.
|
||||
monitor_.wait();
|
||||
}
|
||||
|
||||
void join() {
|
||||
@@ -174,8 +189,6 @@ public:
|
||||
if (res != 0) {
|
||||
GlobalOutput.printf("PthreadThread::join(): fail with code %d", res);
|
||||
}
|
||||
} else {
|
||||
GlobalOutput.printf("PthreadThread::join(): detached thread");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,14 +215,6 @@ void* PthreadThread::threadMain(void* arg) {
|
||||
stdcxx::shared_ptr<PthreadThread> thread = *(stdcxx::shared_ptr<PthreadThread>*)arg;
|
||||
delete reinterpret_cast<stdcxx::shared_ptr<PthreadThread>*>(arg);
|
||||
|
||||
if (thread == NULL) {
|
||||
return (void*)0;
|
||||
}
|
||||
|
||||
if (thread->getState() != starting) {
|
||||
return (void*)0;
|
||||
}
|
||||
|
||||
#if GOOGLE_PERFTOOLS_REGISTER_THREAD
|
||||
ProfilerRegisterThread();
|
||||
#endif
|
||||
|
||||
50
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/StdThreadFactory.cpp
generated
vendored
50
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/StdThreadFactory.cpp
generated
vendored
@@ -21,8 +21,9 @@
|
||||
|
||||
#if USE_STD_THREAD
|
||||
|
||||
#include <thrift/concurrency/StdThreadFactory.h>
|
||||
#include <thrift/concurrency/Exception.h>
|
||||
#include <thrift/concurrency/Monitor.h>
|
||||
#include <thrift/concurrency/StdThreadFactory.h>
|
||||
#include <thrift/stdcxx.h>
|
||||
|
||||
#include <cassert>
|
||||
@@ -49,6 +50,7 @@ public:
|
||||
|
||||
private:
|
||||
std::unique_ptr<std::thread> thread_;
|
||||
Monitor monitor_;
|
||||
STATE state_;
|
||||
bool detached_;
|
||||
|
||||
@@ -68,18 +70,42 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
STATE getState() const
|
||||
{
|
||||
Synchronized sync(monitor_);
|
||||
return state_;
|
||||
}
|
||||
|
||||
void setState(STATE newState)
|
||||
{
|
||||
Synchronized sync(monitor_);
|
||||
state_ = newState;
|
||||
|
||||
// unblock start() with the knowledge that the thread has actually
|
||||
// started running, which avoids a race in detached threads.
|
||||
if (newState == started) {
|
||||
monitor_.notify();
|
||||
}
|
||||
}
|
||||
|
||||
void start() {
|
||||
if (state_ != uninitialized) {
|
||||
if (getState() != uninitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
stdcxx::shared_ptr<StdThread> selfRef = shared_from_this();
|
||||
state_ = starting;
|
||||
setState(starting);
|
||||
|
||||
Synchronized sync(monitor_);
|
||||
thread_ = std::unique_ptr<std::thread>(new std::thread(threadMain, selfRef));
|
||||
|
||||
if (detached_)
|
||||
thread_->detach();
|
||||
|
||||
// Wait for the thread to start and get far enough to grab everything
|
||||
// that it needs from the calling context, thus absolving the caller
|
||||
// from being required to hold on to runnable indefinitely.
|
||||
monitor_.wait();
|
||||
}
|
||||
|
||||
void join() {
|
||||
@@ -96,22 +122,16 @@ public:
|
||||
};
|
||||
|
||||
void StdThread::threadMain(stdcxx::shared_ptr<StdThread> thread) {
|
||||
if (thread == NULL) {
|
||||
return;
|
||||
}
|
||||
#if GOOGLE_PERFTOOLS_REGISTER_THREAD
|
||||
ProfilerRegisterThread();
|
||||
#endif
|
||||
|
||||
if (thread->state_ != starting) {
|
||||
return;
|
||||
}
|
||||
|
||||
thread->state_ = started;
|
||||
thread->setState(started);
|
||||
thread->runnable()->run();
|
||||
|
||||
if (thread->state_ != stopping && thread->state_ != stopped) {
|
||||
thread->state_ = stopping;
|
||||
if (thread->getState() != stopping && thread->getState() != stopped) {
|
||||
thread->setState(stopping);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
StdThreadFactory::StdThreadFactory(bool detached) : ThreadFactory(detached) {
|
||||
|
||||
16
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/TimerManager.cpp
generated
vendored
16
vendor/github.com/apache/thrift/lib/cpp/src/thrift/concurrency/TimerManager.cpp
generated
vendored
@@ -52,6 +52,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
bool operator==(const shared_ptr<Runnable> & runnable) const { return runnable_ == runnable; }
|
||||
|
||||
private:
|
||||
shared_ptr<Runnable> runnable_;
|
||||
friend class TimerManager::Dispatcher;
|
||||
@@ -290,11 +292,23 @@ void TimerManager::add(shared_ptr<Runnable> task, const struct timeval& value) {
|
||||
}
|
||||
|
||||
void TimerManager::remove(shared_ptr<Runnable> task) {
|
||||
(void)task;
|
||||
Synchronized s(monitor_);
|
||||
if (state_ != TimerManager::STARTED) {
|
||||
throw IllegalStateException();
|
||||
}
|
||||
bool found = false;
|
||||
for (task_iterator ix = taskMap_.begin(); ix != taskMap_.end();) {
|
||||
if (*ix->second == task) {
|
||||
found = true;
|
||||
taskCount_--;
|
||||
taskMap_.erase(ix++);
|
||||
} else {
|
||||
++ix;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
throw NoSuchTaskException();
|
||||
}
|
||||
}
|
||||
|
||||
TimerManager::STATE TimerManager::state() const {
|
||||
|
||||
11
vendor/github.com/apache/thrift/lib/cpp/src/thrift/transport/TServerSocket.cpp
generated
vendored
11
vendor/github.com/apache/thrift/lib/cpp/src/thrift/transport/TServerSocket.cpp
generated
vendored
@@ -658,14 +658,21 @@ void TServerSocket::notify(THRIFT_SOCKET notifySocket) {
|
||||
}
|
||||
|
||||
void TServerSocket::interrupt() {
|
||||
notify(interruptSockWriter_);
|
||||
concurrency::Guard g(rwMutex_);
|
||||
if (interruptSockWriter_ != THRIFT_INVALID_SOCKET) {
|
||||
notify(interruptSockWriter_);
|
||||
}
|
||||
}
|
||||
|
||||
void TServerSocket::interruptChildren() {
|
||||
notify(childInterruptSockWriter_);
|
||||
concurrency::Guard g(rwMutex_);
|
||||
if (childInterruptSockWriter_ != THRIFT_INVALID_SOCKET) {
|
||||
notify(childInterruptSockWriter_);
|
||||
}
|
||||
}
|
||||
|
||||
void TServerSocket::close() {
|
||||
concurrency::Guard g(rwMutex_);
|
||||
if (serverSocket_ != THRIFT_INVALID_SOCKET) {
|
||||
shutdown(serverSocket_, THRIFT_SHUT_RDWR);
|
||||
::THRIFT_CLOSESOCKET(serverSocket_);
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/cpp/src/thrift/transport/TServerSocket.h
generated
vendored
2
vendor/github.com/apache/thrift/lib/cpp/src/thrift/transport/TServerSocket.h
generated
vendored
@@ -20,6 +20,7 @@
|
||||
#ifndef _THRIFT_TRANSPORT_TSERVERSOCKET_H_
|
||||
#define _THRIFT_TRANSPORT_TSERVERSOCKET_H_ 1
|
||||
|
||||
#include <thrift/concurrency/Mutex.h>
|
||||
#include <thrift/stdcxx.h>
|
||||
#include <thrift/transport/PlatformSocket.h>
|
||||
#include <thrift/transport/TServerTransport.h>
|
||||
@@ -169,6 +170,7 @@ private:
|
||||
bool keepAlive_;
|
||||
bool listening_;
|
||||
|
||||
concurrency::Mutex rwMutex_; // thread-safe interrupt
|
||||
THRIFT_SOCKET interruptSockWriter_; // is notified on interrupt()
|
||||
THRIFT_SOCKET interruptSockReader_; // is used in select/poll with serverSocket_ for interruptability
|
||||
THRIFT_SOCKET childInterruptSockWriter_; // is notified on interruptChildren()
|
||||
|
||||
1
vendor/github.com/apache/thrift/lib/cpp/test/Makefile.am
generated
vendored
1
vendor/github.com/apache/thrift/lib/cpp/test/Makefile.am
generated
vendored
@@ -360,7 +360,6 @@ OpenSSLManualInitTest_LDADD = \
|
||||
#
|
||||
# Common thrift code generation rules
|
||||
#
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
|
||||
gen-cpp/AnnotationTest_constants.cpp gen-cpp/AnnotationTest_constants.h gen-cpp/AnnotationTest_types.cpp gen-cpp/AnnotationTest_types.h: $(top_srcdir)/test/AnnotationTest.thrift
|
||||
$(THRIFT) --gen cpp $<
|
||||
|
||||
39
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/Tests.cpp
generated
vendored
39
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/Tests.cpp
generated
vendored
@@ -25,6 +25,10 @@
|
||||
#include "TimerManagerTests.h"
|
||||
#include "ThreadManagerTests.h"
|
||||
|
||||
// The test weight, where 10 is 10 times more threads than baseline
|
||||
// and the baseline is optimized for running in valgrind
|
||||
static size_t WEIGHT = 10;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
||||
std::string arg;
|
||||
@@ -37,6 +41,11 @@ int main(int argc, char** argv) {
|
||||
args[ix - 1] = std::string(argv[ix]);
|
||||
}
|
||||
|
||||
if (getenv("VALGRIND") != 0) {
|
||||
// lower the scale of every test
|
||||
WEIGHT = 1;
|
||||
}
|
||||
|
||||
bool runAll = args[0].compare("all") == 0;
|
||||
|
||||
if (runAll || args[0].compare("thread-factory") == 0) {
|
||||
@@ -45,10 +54,10 @@ int main(int argc, char** argv) {
|
||||
|
||||
std::cout << "ThreadFactory tests..." << std::endl;
|
||||
|
||||
int reapLoops = 20;
|
||||
int reapCount = 1000;
|
||||
int reapLoops = 2 * WEIGHT;
|
||||
int reapCount = 100 * WEIGHT;
|
||||
size_t floodLoops = 3;
|
||||
size_t floodCount = 20000;
|
||||
size_t floodCount = 500 * WEIGHT;
|
||||
|
||||
std::cout << "\t\tThreadFactory reap N threads test: N = " << reapLoops << "x" << reapCount << std::endl;
|
||||
|
||||
@@ -114,6 +123,20 @@ int main(int argc, char** argv) {
|
||||
std::cerr << "\t\tTimerManager tests FAILED" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "\t\tTimerManager test01" << std::endl;
|
||||
|
||||
if (!timerManagerTests.test01()) {
|
||||
std::cerr << "\t\tTimerManager tests FAILED" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "\t\tTimerManager test02" << std::endl;
|
||||
|
||||
if (!timerManagerTests.test02()) {
|
||||
std::cerr << "\t\tTimerManager tests FAILED" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (runAll || args[0].compare("thread-manager") == 0) {
|
||||
@@ -121,8 +144,8 @@ int main(int argc, char** argv) {
|
||||
std::cout << "ThreadManager tests..." << std::endl;
|
||||
|
||||
{
|
||||
size_t workerCount = 100;
|
||||
size_t taskCount = 50000;
|
||||
size_t workerCount = 10 * WEIGHT;
|
||||
size_t taskCount = 500 * WEIGHT;
|
||||
int64_t delay = 10LL;
|
||||
|
||||
ThreadManagerTests threadManagerTests;
|
||||
@@ -160,13 +183,13 @@ int main(int argc, char** argv) {
|
||||
|
||||
size_t minWorkerCount = 2;
|
||||
|
||||
size_t maxWorkerCount = 64;
|
||||
size_t maxWorkerCount = 8;
|
||||
|
||||
size_t tasksPerWorker = 1000;
|
||||
size_t tasksPerWorker = 100 * WEIGHT;
|
||||
|
||||
int64_t delay = 5LL;
|
||||
|
||||
for (size_t workerCount = minWorkerCount; workerCount < maxWorkerCount; workerCount *= 4) {
|
||||
for (size_t workerCount = minWorkerCount; workerCount <= maxWorkerCount; workerCount *= 4) {
|
||||
|
||||
size_t taskCount = workerCount * tasksPerWorker;
|
||||
|
||||
|
||||
24
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/ThreadFactoryTests.h
generated
vendored
24
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/ThreadFactoryTests.h
generated
vendored
@@ -21,11 +21,12 @@
|
||||
#include <thrift/concurrency/Thread.h>
|
||||
#include <thrift/concurrency/PlatformThreadFactory.h>
|
||||
#include <thrift/concurrency/Monitor.h>
|
||||
#include <thrift/concurrency/Mutex.h>
|
||||
#include <thrift/concurrency/Util.h>
|
||||
|
||||
#include <assert.h>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
namespace apache {
|
||||
namespace thrift {
|
||||
@@ -78,13 +79,13 @@ public:
|
||||
|
||||
int* activeCount = new int(count);
|
||||
|
||||
std::set<shared_ptr<Thread> > threads;
|
||||
std::vector<shared_ptr<Thread> > threads;
|
||||
|
||||
int tix;
|
||||
|
||||
for (tix = 0; tix < count; tix++) {
|
||||
try {
|
||||
threads.insert(
|
||||
threads.push_back(
|
||||
threadFactory.newThread(shared_ptr<Runnable>(new ReapNTask(*monitor, *activeCount))));
|
||||
} catch (SystemResourceException& e) {
|
||||
std::cout << "\t\t\tfailed to create " << lix* count + tix << " thread " << e.what()
|
||||
@@ -94,7 +95,7 @@ public:
|
||||
}
|
||||
|
||||
tix = 0;
|
||||
for (std::set<shared_ptr<Thread> >::const_iterator thread = threads.begin();
|
||||
for (std::vector<shared_ptr<Thread> >::const_iterator thread = threads.begin();
|
||||
thread != threads.end();
|
||||
tix++, ++thread) {
|
||||
|
||||
@@ -113,6 +114,7 @@ public:
|
||||
monitor->wait(1000);
|
||||
}
|
||||
}
|
||||
|
||||
delete activeCount;
|
||||
std::cout << "\t\t\treaped " << lix* count << " threads" << std::endl;
|
||||
}
|
||||
@@ -253,19 +255,22 @@ public:
|
||||
|
||||
class FloodTask : public Runnable {
|
||||
public:
|
||||
FloodTask(const size_t id) : _id(id) {}
|
||||
FloodTask(const size_t id, Monitor& mon) : _id(id), _mon(mon) {}
|
||||
~FloodTask() {
|
||||
if (_id % 10000 == 0) {
|
||||
Synchronized sync(_mon);
|
||||
std::cout << "\t\tthread " << _id << " done" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void run() {
|
||||
if (_id % 10000 == 0) {
|
||||
Synchronized sync(_mon);
|
||||
std::cout << "\t\tthread " << _id << " started" << std::endl;
|
||||
}
|
||||
}
|
||||
const size_t _id;
|
||||
Monitor& _mon;
|
||||
};
|
||||
|
||||
void foo(PlatformThreadFactory* tf) { (void)tf; }
|
||||
@@ -273,7 +278,8 @@ public:
|
||||
bool floodNTest(size_t loop = 1, size_t count = 100000) {
|
||||
|
||||
bool success = false;
|
||||
|
||||
Monitor mon;
|
||||
|
||||
for (size_t lix = 0; lix < loop; lix++) {
|
||||
|
||||
PlatformThreadFactory threadFactory = PlatformThreadFactory();
|
||||
@@ -283,10 +289,8 @@ public:
|
||||
|
||||
try {
|
||||
|
||||
shared_ptr<FloodTask> task(new FloodTask(lix * count + tix));
|
||||
|
||||
shared_ptr<FloodTask> task(new FloodTask(lix * count + tix, mon));
|
||||
shared_ptr<Thread> thread = threadFactory.newThread(task);
|
||||
|
||||
thread->start();
|
||||
|
||||
} catch (TException& e) {
|
||||
@@ -298,8 +302,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
Synchronized sync(mon);
|
||||
std::cout << "\t\t\tflooded " << (lix + 1) * count << " threads" << std::endl;
|
||||
|
||||
success = true;
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/ThreadManagerTests.h
generated
vendored
2
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/ThreadManagerTests.h
generated
vendored
@@ -109,7 +109,7 @@ public:
|
||||
shared_ptr<ThreadManager> threadManager = ThreadManager::newSimpleThreadManager(workerCount);
|
||||
|
||||
shared_ptr<PlatformThreadFactory> threadFactory
|
||||
= shared_ptr<PlatformThreadFactory>(new PlatformThreadFactory());
|
||||
= shared_ptr<PlatformThreadFactory>(new PlatformThreadFactory(false));
|
||||
|
||||
#if !USE_BOOST_THREAD && !USE_STD_THREAD
|
||||
threadFactory->setPriority(PosixThreadFactory::HIGHEST);
|
||||
|
||||
66
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/TimerManagerTests.h
generated
vendored
66
vendor/github.com/apache/thrift/lib/cpp/test/concurrency/TimerManagerTests.h
generated
vendored
@@ -126,6 +126,72 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates two tasks, removes the first one then waits for the second one. It then
|
||||
* verifies that the timer manager properly clean up itself and the remaining orphaned timeout
|
||||
* task when the manager goes out of scope and its destructor is called.
|
||||
*/
|
||||
bool test01(int64_t timeout = 1000LL) {
|
||||
TimerManager timerManager;
|
||||
timerManager.threadFactory(shared_ptr<PlatformThreadFactory>(new PlatformThreadFactory()));
|
||||
timerManager.start();
|
||||
assert(timerManager.state() == TimerManager::STARTED);
|
||||
|
||||
Synchronized s(_monitor);
|
||||
|
||||
// Setup the two tasks
|
||||
shared_ptr<TimerManagerTests::Task> taskToRemove
|
||||
= shared_ptr<TimerManagerTests::Task>(new TimerManagerTests::Task(_monitor, timeout / 2));
|
||||
timerManager.add(taskToRemove, taskToRemove->_timeout);
|
||||
|
||||
shared_ptr<TimerManagerTests::Task> task
|
||||
= shared_ptr<TimerManagerTests::Task>(new TimerManagerTests::Task(_monitor, timeout));
|
||||
timerManager.add(task, task->_timeout);
|
||||
|
||||
// Remove one task and wait until the other has completed
|
||||
timerManager.remove(taskToRemove);
|
||||
_monitor.wait(timeout * 2);
|
||||
|
||||
assert(!taskToRemove->_done);
|
||||
assert(task->_done);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates two tasks with the same callback and another one, then removes the two
|
||||
* duplicated then waits for the last one. It then verifies that the timer manager properly
|
||||
* clean up itself and the remaining orphaned timeout task when the manager goes out of scope
|
||||
* and its destructor is called.
|
||||
*/
|
||||
bool test02(int64_t timeout = 1000LL) {
|
||||
TimerManager timerManager;
|
||||
timerManager.threadFactory(shared_ptr<PlatformThreadFactory>(new PlatformThreadFactory()));
|
||||
timerManager.start();
|
||||
assert(timerManager.state() == TimerManager::STARTED);
|
||||
|
||||
Synchronized s(_monitor);
|
||||
|
||||
// Setup the one tasks and add it twice
|
||||
shared_ptr<TimerManagerTests::Task> taskToRemove
|
||||
= shared_ptr<TimerManagerTests::Task>(new TimerManagerTests::Task(_monitor, timeout / 3));
|
||||
timerManager.add(taskToRemove, taskToRemove->_timeout);
|
||||
timerManager.add(taskToRemove, taskToRemove->_timeout * 2);
|
||||
|
||||
shared_ptr<TimerManagerTests::Task> task
|
||||
= shared_ptr<TimerManagerTests::Task>(new TimerManagerTests::Task(_monitor, timeout));
|
||||
timerManager.add(task, task->_timeout);
|
||||
|
||||
// Remove the first task (e.g. two timers) and wait until the other has completed
|
||||
timerManager.remove(taskToRemove);
|
||||
_monitor.wait(timeout * 2);
|
||||
|
||||
assert(!taskToRemove->_done);
|
||||
assert(task->_done);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
friend class TestTask;
|
||||
|
||||
Monitor _monitor;
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/d/test/Makefile.am
generated
vendored
2
vendor/github.com/apache/thrift/lib/d/test/Makefile.am
generated
vendored
@@ -24,8 +24,6 @@ BUILT_SOURCES = trusted-ca-certificate.pem server-certificate.pem
|
||||
|
||||
# Thrift compiler rules
|
||||
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
|
||||
debug_proto_gen = $(addprefix gen-d/, DebugProtoTest_types.d)
|
||||
|
||||
$(debug_proto_gen): $(top_srcdir)/test/DebugProtoTest.thrift
|
||||
|
||||
22
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Processor.Multiplex.pas
generated
vendored
22
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Processor.Multiplex.pas
generated
vendored
@@ -68,16 +68,16 @@ type
|
||||
// the standard format, without the service name prepended to TMessage.name.
|
||||
TStoredMessageProtocol = class( TProtocolDecorator)
|
||||
private
|
||||
FMessageBegin : IMessage;
|
||||
FMessageBegin : TThriftMessage;
|
||||
public
|
||||
constructor Create( const protocol : IProtocol; const aMsgBegin : IMessage);
|
||||
function ReadMessageBegin: IMessage; override;
|
||||
constructor Create( const protocol : IProtocol; const aMsgBegin : TThriftMessage);
|
||||
function ReadMessageBegin: TThriftMessage; override;
|
||||
end;
|
||||
|
||||
private
|
||||
FServiceProcessorMap : TDictionary<String, IProcessor>;
|
||||
|
||||
procedure Error( const oprot : IProtocol; const msg : IMessage;
|
||||
procedure Error( const oprot : IProtocol; const msg : TThriftMessage;
|
||||
extype : TApplicationExceptionSpecializedClass; const etxt : string);
|
||||
|
||||
public
|
||||
@@ -105,14 +105,14 @@ type
|
||||
|
||||
implementation
|
||||
|
||||
constructor TMultiplexedProcessorImpl.TStoredMessageProtocol.Create( const protocol : IProtocol; const aMsgBegin : IMessage);
|
||||
constructor TMultiplexedProcessorImpl.TStoredMessageProtocol.Create( const protocol : IProtocol; const aMsgBegin : TThriftMessage);
|
||||
begin
|
||||
inherited Create( protocol);
|
||||
FMessageBegin := aMsgBegin;
|
||||
end;
|
||||
|
||||
|
||||
function TMultiplexedProcessorImpl.TStoredMessageProtocol.ReadMessageBegin: IMessage;
|
||||
function TMultiplexedProcessorImpl.TStoredMessageProtocol.ReadMessageBegin: TThriftMessage;
|
||||
begin
|
||||
result := FMessageBegin;
|
||||
end;
|
||||
@@ -141,15 +141,15 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TMultiplexedProcessorImpl.Error( const oprot : IProtocol; const msg : IMessage;
|
||||
procedure TMultiplexedProcessorImpl.Error( const oprot : IProtocol; const msg : TThriftMessage;
|
||||
extype : TApplicationExceptionSpecializedClass;
|
||||
const etxt : string);
|
||||
var appex : TApplicationException;
|
||||
newMsg : IMessage;
|
||||
newMsg : TThriftMessage;
|
||||
begin
|
||||
appex := extype.Create(etxt);
|
||||
try
|
||||
newMsg := TMessageImpl.Create( msg.Name, TMessageType.Exception, msg.SeqID);
|
||||
Init( newMsg, msg.Name, TMessageType.Exception, msg.SeqID);
|
||||
|
||||
oprot.WriteMessageBegin(newMsg);
|
||||
appex.Write(oprot);
|
||||
@@ -163,7 +163,7 @@ end;
|
||||
|
||||
|
||||
function TMultiplexedProcessorImpl.Process(const iprot, oprot : IProtocol; const events : IProcessorEvents = nil): Boolean;
|
||||
var msg, newMsg : IMessage;
|
||||
var msg, newMsg : TThriftMessage;
|
||||
idx : Integer;
|
||||
sService : string;
|
||||
processor : IProcessor;
|
||||
@@ -204,7 +204,7 @@ begin
|
||||
|
||||
// Create a new TMessage, removing the service name
|
||||
Inc( idx, Length(TMultiplexedProtocol.SEPARATOR));
|
||||
newMsg := TMessageImpl.Create( Copy( msg.Name, idx, MAXINT), msg.Type_, msg.SeqID);
|
||||
Init( newMsg, Copy( msg.Name, idx, MAXINT), msg.Type_, msg.SeqID);
|
||||
|
||||
// Dispatch processing to the stored processor
|
||||
protocol := TStoredMessageProtocol.Create( iprot, newMsg);
|
||||
|
||||
86
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.Compact.pas
generated
vendored
86
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.Compact.pas
generated
vendored
@@ -123,7 +123,7 @@ type
|
||||
|
||||
// If we encounter a boolean field begin, save the TField here so it can
|
||||
// have the value incorporated.
|
||||
private booleanField_ : IField;
|
||||
private booleanField_ : TThriftField;
|
||||
|
||||
// If we Read a field header, and it's a boolean field, save the boolean
|
||||
// value here so that ReadBool can use it.
|
||||
@@ -148,21 +148,21 @@ type
|
||||
private
|
||||
// The workhorse of WriteFieldBegin. It has the option of doing a 'type override'
|
||||
// of the type header. This is used specifically in the boolean field case.
|
||||
procedure WriteFieldBeginInternal( const field : IField; typeOverride : Byte);
|
||||
procedure WriteFieldBeginInternal( const field : TThriftField; typeOverride : Byte);
|
||||
|
||||
public
|
||||
procedure WriteMessageBegin( const msg: IMessage); override;
|
||||
procedure WriteMessageBegin( const msg: TThriftMessage); override;
|
||||
procedure WriteMessageEnd; override;
|
||||
procedure WriteStructBegin( const struc: IStruct); override;
|
||||
procedure WriteStructBegin( const struc: TThriftStruct); override;
|
||||
procedure WriteStructEnd; override;
|
||||
procedure WriteFieldBegin( const field: IField); override;
|
||||
procedure WriteFieldBegin( const field: TThriftField); override;
|
||||
procedure WriteFieldEnd; override;
|
||||
procedure WriteFieldStop; override;
|
||||
procedure WriteMapBegin( const map: IMap); override;
|
||||
procedure WriteMapBegin( const map: TThriftMap); override;
|
||||
procedure WriteMapEnd; override;
|
||||
procedure WriteListBegin( const list: IList); override;
|
||||
procedure WriteListBegin( const list: TThriftList); override;
|
||||
procedure WriteListEnd(); override;
|
||||
procedure WriteSetBegin( const set_: ISet ); override;
|
||||
procedure WriteSetBegin( const set_: TThriftSet ); override;
|
||||
procedure WriteSetEnd(); override;
|
||||
procedure WriteBool( b: Boolean); override;
|
||||
procedure WriteByte( b: ShortInt); override;
|
||||
@@ -194,17 +194,17 @@ type
|
||||
class procedure fixedLongToBytes( const n : Int64; var buf : TBytes);
|
||||
|
||||
public
|
||||
function ReadMessageBegin: IMessage; override;
|
||||
function ReadMessageBegin: TThriftMessage; override;
|
||||
procedure ReadMessageEnd(); override;
|
||||
function ReadStructBegin: IStruct; override;
|
||||
function ReadStructBegin: TThriftStruct; override;
|
||||
procedure ReadStructEnd; override;
|
||||
function ReadFieldBegin: IField; override;
|
||||
function ReadFieldBegin: TThriftField; override;
|
||||
procedure ReadFieldEnd(); override;
|
||||
function ReadMapBegin: IMap; override;
|
||||
function ReadMapBegin: TThriftMap; override;
|
||||
procedure ReadMapEnd(); override;
|
||||
function ReadListBegin: IList; override;
|
||||
function ReadListBegin: TThriftList; override;
|
||||
procedure ReadListEnd(); override;
|
||||
function ReadSetBegin: ISet; override;
|
||||
function ReadSetBegin: TThriftSet; override;
|
||||
procedure ReadSetEnd(); override;
|
||||
function ReadBool: Boolean; override;
|
||||
function ReadByte: ShortInt; override;
|
||||
@@ -273,7 +273,7 @@ begin
|
||||
lastFieldId_ := 0;
|
||||
lastField_ := TStack<Integer>.Create;
|
||||
|
||||
booleanField_ := nil;
|
||||
Init( booleanField_, '', TType.Stop, 0);
|
||||
boolValue_ := unused;
|
||||
end;
|
||||
|
||||
@@ -293,7 +293,7 @@ procedure TCompactProtocolImpl.Reset;
|
||||
begin
|
||||
lastField_.Clear();
|
||||
lastFieldId_ := 0;
|
||||
booleanField_ := nil;
|
||||
Init( booleanField_, '', TType.Stop, 0);
|
||||
boolValue_ := unused;
|
||||
end;
|
||||
|
||||
@@ -301,11 +301,8 @@ end;
|
||||
// Writes a byte without any possibility of all that field header nonsense.
|
||||
// Used internally by other writing methods that know they need to Write a byte.
|
||||
procedure TCompactProtocolImpl.WriteByteDirect( const b : Byte);
|
||||
var data : TBytes;
|
||||
begin
|
||||
SetLength( data, 1);
|
||||
data[0] := b;
|
||||
Transport.Write( data);
|
||||
Transport.Write( @b, SizeOf(b));
|
||||
end;
|
||||
|
||||
|
||||
@@ -344,7 +341,7 @@ end;
|
||||
|
||||
// Write a message header to the wire. Compact Protocol messages contain the
|
||||
// protocol version so we can migrate forwards in the future if need be.
|
||||
procedure TCompactProtocolImpl.WriteMessageBegin( const msg: IMessage);
|
||||
procedure TCompactProtocolImpl.WriteMessageBegin( const msg: TThriftMessage);
|
||||
var versionAndType : Byte;
|
||||
begin
|
||||
Reset;
|
||||
@@ -362,7 +359,7 @@ end;
|
||||
// Write a struct begin. This doesn't actually put anything on the wire. We use it as an
|
||||
// opportunity to put special placeholder markers on the field stack so we can get the
|
||||
// field id deltas correct.
|
||||
procedure TCompactProtocolImpl.WriteStructBegin( const struc: IStruct);
|
||||
procedure TCompactProtocolImpl.WriteStructBegin( const struc: TThriftStruct);
|
||||
begin
|
||||
lastField_.Push(lastFieldId_);
|
||||
lastFieldId_ := 0;
|
||||
@@ -380,7 +377,7 @@ end;
|
||||
// Write a field header containing the field id and field type. If the difference between the
|
||||
// current field id and the last one is small (< 15), then the field id will be encoded in
|
||||
// the 4 MSB as a delta. Otherwise, the field id will follow the type header as a zigzag varint.
|
||||
procedure TCompactProtocolImpl.WriteFieldBegin( const field: IField);
|
||||
procedure TCompactProtocolImpl.WriteFieldBegin( const field: TThriftField);
|
||||
begin
|
||||
case field.Type_ of
|
||||
TType.Bool_ : booleanField_ := field; // we want to possibly include the value, so we'll wait.
|
||||
@@ -392,7 +389,7 @@ end;
|
||||
|
||||
// The workhorse of WriteFieldBegin. It has the option of doing a 'type override'
|
||||
// of the type header. This is used specifically in the boolean field case.
|
||||
procedure TCompactProtocolImpl.WriteFieldBeginInternal( const field : IField; typeOverride : Byte);
|
||||
procedure TCompactProtocolImpl.WriteFieldBeginInternal( const field : TThriftField; typeOverride : Byte);
|
||||
var typeToWrite : Byte;
|
||||
begin
|
||||
// if there's a type override, use that.
|
||||
@@ -425,7 +422,7 @@ end;
|
||||
|
||||
// Write a map header. If the map is empty, omit the key and value type
|
||||
// headers, as we don't need any additional information to skip it.
|
||||
procedure TCompactProtocolImpl.WriteMapBegin( const map: IMap);
|
||||
procedure TCompactProtocolImpl.WriteMapBegin( const map: TThriftMap);
|
||||
var key, val : Byte;
|
||||
begin
|
||||
if (map.Count = 0)
|
||||
@@ -440,14 +437,14 @@ end;
|
||||
|
||||
|
||||
// Write a list header.
|
||||
procedure TCompactProtocolImpl.WriteListBegin( const list: IList);
|
||||
procedure TCompactProtocolImpl.WriteListBegin( const list: TThriftList);
|
||||
begin
|
||||
WriteCollectionBegin( list.ElementType, list.Count);
|
||||
end;
|
||||
|
||||
|
||||
// Write a set header.
|
||||
procedure TCompactProtocolImpl.WriteSetBegin( const set_: ISet );
|
||||
procedure TCompactProtocolImpl.WriteSetBegin( const set_: TThriftSet );
|
||||
begin
|
||||
WriteCollectionBegin( set_.ElementType, set_.Count);
|
||||
end;
|
||||
@@ -464,10 +461,10 @@ begin
|
||||
then bt := Types.BOOLEAN_TRUE
|
||||
else bt := Types.BOOLEAN_FALSE;
|
||||
|
||||
if booleanField_ <> nil then begin
|
||||
if booleanField_.Type_ = TType.Bool_ then begin
|
||||
// we haven't written the field header yet
|
||||
WriteFieldBeginInternal( booleanField_, Byte(bt));
|
||||
booleanField_ := nil;
|
||||
booleanField_.Type_ := TType.Stop;
|
||||
end
|
||||
else begin
|
||||
// we're not part of a field, so just Write the value.
|
||||
@@ -642,7 +639,7 @@ end;
|
||||
|
||||
|
||||
// Read a message header.
|
||||
function TCompactProtocolImpl.ReadMessageBegin : IMessage;
|
||||
function TCompactProtocolImpl.ReadMessageBegin : TThriftMessage;
|
||||
var protocolId, versionAndType, version, type_ : Byte;
|
||||
seqid : Integer;
|
||||
msgNm : String;
|
||||
@@ -663,17 +660,17 @@ begin
|
||||
type_ := Byte( (versionAndType shr TYPE_SHIFT_AMOUNT) and TYPE_BITS);
|
||||
seqid := Integer( ReadVarint32);
|
||||
msgNm := ReadString;
|
||||
result := TMessageImpl.Create( msgNm, TMessageType(type_), seqid);
|
||||
Init( result, msgNm, TMessageType(type_), seqid);
|
||||
end;
|
||||
|
||||
|
||||
// Read a struct begin. There's nothing on the wire for this, but it is our
|
||||
// opportunity to push a new struct begin marker onto the field stack.
|
||||
function TCompactProtocolImpl.ReadStructBegin: IStruct;
|
||||
function TCompactProtocolImpl.ReadStructBegin: TThriftStruct;
|
||||
begin
|
||||
lastField_.Push( lastFieldId_);
|
||||
lastFieldId_ := 0;
|
||||
result := TStructImpl.Create('');
|
||||
Init( result);
|
||||
end;
|
||||
|
||||
|
||||
@@ -687,7 +684,7 @@ end;
|
||||
|
||||
|
||||
// Read a field header off the wire.
|
||||
function TCompactProtocolImpl.ReadFieldBegin: IField;
|
||||
function TCompactProtocolImpl.ReadFieldBegin: TThriftField;
|
||||
var type_ : Byte;
|
||||
fieldId, modifier : ShortInt;
|
||||
begin
|
||||
@@ -695,7 +692,7 @@ begin
|
||||
|
||||
// if it's a stop, then we can return immediately, as the struct is over.
|
||||
if type_ = Byte(Types.STOP) then begin
|
||||
result := TFieldImpl.Create( '', TType.Stop, 0);
|
||||
Init( result, '', TType.Stop, 0);
|
||||
Exit;
|
||||
end;
|
||||
|
||||
@@ -705,7 +702,7 @@ begin
|
||||
then fieldId := ReadI16 // not a delta. look ahead for the zigzag varint field id.
|
||||
else fieldId := ShortInt( lastFieldId_ + modifier); // add the delta to the last Read field id.
|
||||
|
||||
result := TFieldImpl.Create( '', getTType(Byte(type_ and $0F)), fieldId);
|
||||
Init( result, '', getTType(Byte(type_ and $0F)), fieldId);
|
||||
|
||||
// if this happens to be a boolean field, the value is encoded in the type
|
||||
// save the boolean value in a special instance variable.
|
||||
@@ -723,7 +720,7 @@ end;
|
||||
// Read a map header off the wire. If the size is zero, skip Reading the key
|
||||
// and value type. This means that 0-length maps will yield TMaps without the
|
||||
// "correct" types.
|
||||
function TCompactProtocolImpl.ReadMapBegin: IMap;
|
||||
function TCompactProtocolImpl.ReadMapBegin: TThriftMap;
|
||||
var size : Integer;
|
||||
keyAndValueType : Byte;
|
||||
key, val : TType;
|
||||
@@ -735,7 +732,7 @@ begin
|
||||
|
||||
key := getTType( Byte( keyAndValueType shr 4));
|
||||
val := getTType( Byte( keyAndValueType and $F));
|
||||
result := TMapImpl.Create( key, val, size);
|
||||
Init( result, key, val, size);
|
||||
ASSERT( (result.KeyType = key) and (result.ValueType = val));
|
||||
end;
|
||||
|
||||
@@ -744,7 +741,7 @@ end;
|
||||
// be packed into the element type header. If it's a longer list, the 4 MSB
|
||||
// of the element type header will be $F, and a varint will follow with the
|
||||
// true size.
|
||||
function TCompactProtocolImpl.ReadListBegin: IList;
|
||||
function TCompactProtocolImpl.ReadListBegin: TThriftList;
|
||||
var size_and_type : Byte;
|
||||
size : Integer;
|
||||
type_ : TType;
|
||||
@@ -756,7 +753,7 @@ begin
|
||||
then size := Integer( ReadVarint32);
|
||||
|
||||
type_ := getTType( size_and_type);
|
||||
result := TListImpl.Create( type_, size);
|
||||
Init( result, type_, size);
|
||||
end;
|
||||
|
||||
|
||||
@@ -764,7 +761,7 @@ end;
|
||||
// be packed into the element type header. If it's a longer set, the 4 MSB
|
||||
// of the element type header will be $F, and a varint will follow with the
|
||||
// true size.
|
||||
function TCompactProtocolImpl.ReadSetBegin: ISet;
|
||||
function TCompactProtocolImpl.ReadSetBegin: TThriftSet;
|
||||
var size_and_type : Byte;
|
||||
size : Integer;
|
||||
type_ : TType;
|
||||
@@ -776,7 +773,7 @@ begin
|
||||
then size := Integer( ReadVarint32);
|
||||
|
||||
type_ := getTType( size_and_type);
|
||||
result := TSetImpl.Create( type_, size);
|
||||
Init( result, type_, size);
|
||||
end;
|
||||
|
||||
|
||||
@@ -797,11 +794,8 @@ end;
|
||||
|
||||
// Read a single byte off the wire. Nothing interesting here.
|
||||
function TCompactProtocolImpl.ReadByte: ShortInt;
|
||||
var data : TBytes;
|
||||
begin
|
||||
SetLength( data, 1);
|
||||
Transport.ReadAll( data, 0, 1);
|
||||
result := ShortInt(data[0]);
|
||||
Transport.ReadAll( @result, SizeOf(result), 0, 1);
|
||||
end;
|
||||
|
||||
|
||||
|
||||
72
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.JSON.pas
generated
vendored
72
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.JSON.pas
generated
vendored
@@ -103,7 +103,7 @@ type
|
||||
|
||||
private
|
||||
FHasData : Boolean;
|
||||
FData : TBytes;
|
||||
FData : Byte;
|
||||
|
||||
public
|
||||
// Return and consume the next byte to be Read, either taking it from the
|
||||
@@ -169,18 +169,18 @@ type
|
||||
|
||||
public
|
||||
// IProtocol
|
||||
procedure WriteMessageBegin( const aMsg : IMessage); override;
|
||||
procedure WriteMessageBegin( const aMsg : TThriftMessage); override;
|
||||
procedure WriteMessageEnd; override;
|
||||
procedure WriteStructBegin( const struc: IStruct); override;
|
||||
procedure WriteStructBegin( const struc: TThriftStruct); override;
|
||||
procedure WriteStructEnd; override;
|
||||
procedure WriteFieldBegin( const field: IField); override;
|
||||
procedure WriteFieldBegin( const field: TThriftField); override;
|
||||
procedure WriteFieldEnd; override;
|
||||
procedure WriteFieldStop; override;
|
||||
procedure WriteMapBegin( const map: IMap); override;
|
||||
procedure WriteMapBegin( const map: TThriftMap); override;
|
||||
procedure WriteMapEnd; override;
|
||||
procedure WriteListBegin( const list: IList); override;
|
||||
procedure WriteListBegin( const list: TThriftList); override;
|
||||
procedure WriteListEnd(); override;
|
||||
procedure WriteSetBegin( const set_: ISet ); override;
|
||||
procedure WriteSetBegin( const set_: TThriftSet ); override;
|
||||
procedure WriteSetEnd(); override;
|
||||
procedure WriteBool( b: Boolean); override;
|
||||
procedure WriteByte( b: ShortInt); override;
|
||||
@@ -191,17 +191,17 @@ type
|
||||
procedure WriteString( const s: string ); override;
|
||||
procedure WriteBinary( const b: TBytes); override;
|
||||
//
|
||||
function ReadMessageBegin: IMessage; override;
|
||||
function ReadMessageBegin: TThriftMessage; override;
|
||||
procedure ReadMessageEnd(); override;
|
||||
function ReadStructBegin: IStruct; override;
|
||||
function ReadStructBegin: TThriftStruct; override;
|
||||
procedure ReadStructEnd; override;
|
||||
function ReadFieldBegin: IField; override;
|
||||
function ReadFieldBegin: TThriftField; override;
|
||||
procedure ReadFieldEnd(); override;
|
||||
function ReadMapBegin: IMap; override;
|
||||
function ReadMapBegin: TThriftMap; override;
|
||||
procedure ReadMapEnd(); override;
|
||||
function ReadListBegin: IList; override;
|
||||
function ReadListBegin: TThriftList; override;
|
||||
procedure ReadListEnd(); override;
|
||||
function ReadSetBegin: ISet; override;
|
||||
function ReadSetBegin: TThriftSet; override;
|
||||
procedure ReadSetEnd(); override;
|
||||
function ReadBool: Boolean; override;
|
||||
function ReadByte: ShortInt; override;
|
||||
@@ -437,21 +437,19 @@ begin
|
||||
if FHasData
|
||||
then FHasData := FALSE
|
||||
else begin
|
||||
SetLength( FData, 1);
|
||||
IJSONProtocol(FProto).Transport.ReadAll( FData, 0, 1);
|
||||
IJSONProtocol(FProto).Transport.ReadAll( @FData, SizeOf(FData), 0, 1);
|
||||
end;
|
||||
result := FData[0];
|
||||
result := FData;
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.TLookaheadReader.Peek : Byte;
|
||||
begin
|
||||
if not FHasData then begin
|
||||
SetLength( FData, 1);
|
||||
IJSONProtocol(FProto).Transport.ReadAll( FData, 0, 1);
|
||||
IJSONProtocol(FProto).Transport.ReadAll( @FData, SizeOf(FData), 0, 1);
|
||||
FHasData := TRUE;
|
||||
end;
|
||||
result := FData[0];
|
||||
result := FData;
|
||||
end;
|
||||
|
||||
|
||||
@@ -681,7 +679,7 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TJSONProtocolImpl.WriteMessageBegin( const aMsg : IMessage);
|
||||
procedure TJSONProtocolImpl.WriteMessageBegin( const aMsg : TThriftMessage);
|
||||
begin
|
||||
ResetContextStack; // THRIFT-1473
|
||||
|
||||
@@ -700,7 +698,7 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TJSONProtocolImpl.WriteStructBegin( const struc: IStruct);
|
||||
procedure TJSONProtocolImpl.WriteStructBegin( const struc: TThriftStruct);
|
||||
begin
|
||||
WriteJSONObjectStart;
|
||||
end;
|
||||
@@ -712,7 +710,7 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TJSONProtocolImpl.WriteFieldBegin( const field : IField);
|
||||
procedure TJSONProtocolImpl.WriteFieldBegin( const field : TThriftField);
|
||||
begin
|
||||
WriteJSONInteger(field.ID);
|
||||
WriteJSONObjectStart;
|
||||
@@ -731,7 +729,7 @@ begin
|
||||
// nothing to do
|
||||
end;
|
||||
|
||||
procedure TJSONProtocolImpl.WriteMapBegin( const map: IMap);
|
||||
procedure TJSONProtocolImpl.WriteMapBegin( const map: TThriftMap);
|
||||
begin
|
||||
WriteJSONArrayStart;
|
||||
WriteJSONString( GetTypeNameForTypeID( map.KeyType));
|
||||
@@ -748,7 +746,7 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TJSONProtocolImpl.WriteListBegin( const list: IList);
|
||||
procedure TJSONProtocolImpl.WriteListBegin( const list: TThriftList);
|
||||
begin
|
||||
WriteJSONArrayStart;
|
||||
WriteJSONString( GetTypeNameForTypeID( list.ElementType));
|
||||
@@ -762,7 +760,7 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TJSONProtocolImpl.WriteSetBegin( const set_: ISet);
|
||||
procedure TJSONProtocolImpl.WriteSetBegin( const set_: TThriftSet);
|
||||
begin
|
||||
WriteJSONArrayStart;
|
||||
WriteJSONString( GetTypeNameForTypeID( set_.ElementType));
|
||||
@@ -1051,11 +1049,11 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadMessageBegin: IMessage;
|
||||
function TJSONProtocolImpl.ReadMessageBegin: TThriftMessage;
|
||||
begin
|
||||
ResetContextStack; // THRIFT-1473
|
||||
|
||||
result := TMessageImpl.Create;
|
||||
Init( result);
|
||||
ReadJSONArrayStart;
|
||||
|
||||
if ReadJSONInteger <> VERSION
|
||||
@@ -1073,10 +1071,10 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadStructBegin : IStruct ;
|
||||
function TJSONProtocolImpl.ReadStructBegin : TThriftStruct ;
|
||||
begin
|
||||
ReadJSONObjectStart;
|
||||
result := TStructImpl.Create('');
|
||||
Init( result);
|
||||
end;
|
||||
|
||||
|
||||
@@ -1086,11 +1084,11 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadFieldBegin : IField;
|
||||
function TJSONProtocolImpl.ReadFieldBegin : TThriftField;
|
||||
var ch : Byte;
|
||||
str : string;
|
||||
begin
|
||||
result := TFieldImpl.Create;
|
||||
Init( result);
|
||||
ch := FReader.Peek;
|
||||
if ch = RBRACE[0]
|
||||
then result.Type_ := TType.Stop
|
||||
@@ -1110,10 +1108,10 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadMapBegin : IMap;
|
||||
function TJSONProtocolImpl.ReadMapBegin : TThriftMap;
|
||||
var str : string;
|
||||
begin
|
||||
result := TMapImpl.Create;
|
||||
Init( result);
|
||||
ReadJSONArrayStart;
|
||||
|
||||
str := SysUtils.TEncoding.UTF8.GetString( ReadJSONString(FALSE));
|
||||
@@ -1134,10 +1132,10 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadListBegin : IList;
|
||||
function TJSONProtocolImpl.ReadListBegin : TThriftList;
|
||||
var str : string;
|
||||
begin
|
||||
result := TListImpl.Create;
|
||||
Init( result);
|
||||
ReadJSONArrayStart;
|
||||
|
||||
str := SysUtils.TEncoding.UTF8.GetString( ReadJSONString(FALSE));
|
||||
@@ -1152,10 +1150,10 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
function TJSONProtocolImpl.ReadSetBegin : ISet;
|
||||
function TJSONProtocolImpl.ReadSetBegin : TThriftSet;
|
||||
var str : string;
|
||||
begin
|
||||
result := TSetImpl.Create;
|
||||
Init( result);
|
||||
ReadJSONArrayStart;
|
||||
|
||||
str := SysUtils.TEncoding.UTF8.GetString( ReadJSONString(FALSE));
|
||||
|
||||
8
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.Multiplex.pas
generated
vendored
8
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.Multiplex.pas
generated
vendored
@@ -71,7 +71,7 @@ type
|
||||
{ Prepends the service name to the function name, separated by SEPARATOR.
|
||||
Args: The original message.
|
||||
}
|
||||
procedure WriteMessageBegin( const msg: IMessage); override;
|
||||
procedure WriteMessageBegin( const msg: TThriftMessage); override;
|
||||
end;
|
||||
|
||||
|
||||
@@ -86,14 +86,14 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TMultiplexedProtocol.WriteMessageBegin( const msg: IMessage);
|
||||
procedure TMultiplexedProtocol.WriteMessageBegin( const msg: TThriftMessage);
|
||||
// Prepends the service name to the function name, separated by TMultiplexedProtocol.SEPARATOR.
|
||||
var newMsg : IMessage;
|
||||
var newMsg : TThriftMessage;
|
||||
begin
|
||||
case msg.Type_ of
|
||||
TMessageType.Call,
|
||||
TMessageType.Oneway : begin
|
||||
newMsg := TMessageImpl.Create( FServiceName + SEPARATOR + msg.Name, msg.Type_, msg.SeqID);
|
||||
Init( newMsg, FServiceName + SEPARATOR + msg.Name, msg.Type_, msg.SeqID);
|
||||
inherited WriteMessageBegin( newMsg);
|
||||
end;
|
||||
|
||||
|
||||
752
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.pas
generated
vendored
752
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Protocol.pas
generated
vendored
File diff suppressed because it is too large
Load Diff
82
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Stream.pas
generated
vendored
82
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Stream.pas
generated
vendored
@@ -38,9 +38,11 @@ uses
|
||||
type
|
||||
|
||||
IThriftStream = interface
|
||||
['{732621B3-F697-4D76-A1B0-B4DD5A8E4018}']
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer;
|
||||
['{2A77D916-7446-46C1-8545-0AEC0008DBCA}']
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); overload;
|
||||
procedure Write( const pBuf : Pointer; offset: Integer; count: Integer); overload;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; overload;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; overload;
|
||||
procedure Open;
|
||||
procedure Close;
|
||||
procedure Flush;
|
||||
@@ -50,10 +52,12 @@ type
|
||||
|
||||
TThriftStreamImpl = class( TInterfacedObject, IThriftStream)
|
||||
private
|
||||
procedure CheckSizeAndOffset( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
procedure CheckSizeAndOffset( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer); overload;
|
||||
protected
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); virtual;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; virtual;
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); overload; inline;
|
||||
procedure Write( const pBuf : Pointer; offset: Integer; count: Integer); overload; virtual;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; overload; inline;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; overload; virtual;
|
||||
procedure Open; virtual; abstract;
|
||||
procedure Close; virtual; abstract;
|
||||
procedure Flush; virtual; abstract;
|
||||
@@ -66,8 +70,8 @@ type
|
||||
FStream : TStream;
|
||||
FOwnsStream : Boolean;
|
||||
protected
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); override;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; offset: Integer; count: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Open; override;
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
@@ -82,8 +86,8 @@ type
|
||||
private
|
||||
FStream : IStream;
|
||||
protected
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); override;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; offset: Integer; count: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Open; override;
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
@@ -127,13 +131,17 @@ begin
|
||||
// nothing to do
|
||||
end;
|
||||
|
||||
function TThriftStreamAdapterCOM.Read( var buffer: TBytes; offset: Integer; count: Integer): Integer;
|
||||
function TThriftStreamAdapterCOM.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
begin
|
||||
inherited;
|
||||
|
||||
if count >= buflen-offset
|
||||
then count := buflen-offset;
|
||||
|
||||
Result := 0;
|
||||
if FStream <> nil then begin
|
||||
if count > 0 then begin
|
||||
FStream.Read( @buffer[offset], count, @Result);
|
||||
FStream.Read( @(PByteArray(pBuf)^[offset]), count, @Result);
|
||||
end;
|
||||
end;
|
||||
end;
|
||||
@@ -162,44 +170,53 @@ begin
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure TThriftStreamAdapterCOM.Write( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
procedure TThriftStreamAdapterCOM.Write( const pBuf: Pointer; offset: Integer; count: Integer);
|
||||
var nWritten : Integer;
|
||||
begin
|
||||
inherited;
|
||||
if IsOpen then begin
|
||||
if count > 0 then begin
|
||||
FStream.Write( @buffer[0], count, @nWritten);
|
||||
FStream.Write( @(PByteArray(pBuf)^[offset]), count, @nWritten);
|
||||
end;
|
||||
end;
|
||||
end;
|
||||
|
||||
{ TThriftStreamImpl }
|
||||
|
||||
procedure TThriftStreamImpl.CheckSizeAndOffset(const buffer: TBytes; offset,
|
||||
count: Integer);
|
||||
var
|
||||
len : Integer;
|
||||
procedure TThriftStreamImpl.CheckSizeAndOffset( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer);
|
||||
begin
|
||||
if count > 0 then begin
|
||||
len := Length( buffer );
|
||||
if (offset < 0) or ( offset >= len) then begin
|
||||
if (offset < 0) or ( offset >= buflen) then begin
|
||||
raise ERangeError.Create( SBitsIndexError );
|
||||
end;
|
||||
if count > len then begin
|
||||
if count > buflen then begin
|
||||
raise ERangeError.Create( SBitsIndexError );
|
||||
end;
|
||||
end;
|
||||
end;
|
||||
|
||||
function TThriftStreamImpl.Read(var buffer: TBytes; offset, count: Integer): Integer;
|
||||
begin
|
||||
if Length(buffer) > 0
|
||||
then Result := Read( @buffer[0], Length(buffer), offset, count)
|
||||
else Result := 0;
|
||||
end;
|
||||
|
||||
function TThriftStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
begin
|
||||
Result := 0;
|
||||
CheckSizeAndOffset( buffer, offset, count );
|
||||
CheckSizeAndOffset( pBuf, buflen, offset, count );
|
||||
end;
|
||||
|
||||
procedure TThriftStreamImpl.Write(const buffer: TBytes; offset, count: Integer);
|
||||
begin
|
||||
CheckSizeAndOffset( buffer, offset, count );
|
||||
if Length(buffer) > 0
|
||||
then Write( @buffer[0], offset, count);
|
||||
end;
|
||||
|
||||
procedure TThriftStreamImpl.Write( const pBuf : Pointer; offset: Integer; count: Integer);
|
||||
begin
|
||||
CheckSizeAndOffset( pBuf, offset+count, offset, count);
|
||||
end;
|
||||
|
||||
{ TThriftStreamAdapterDelphi }
|
||||
@@ -241,14 +258,16 @@ begin
|
||||
// nothing to do
|
||||
end;
|
||||
|
||||
function TThriftStreamAdapterDelphi.Read(var buffer: TBytes; offset,
|
||||
count: Integer): Integer;
|
||||
function TThriftStreamAdapterDelphi.Read(const pBuf : Pointer; const buflen : Integer; offset, count: Integer): Integer;
|
||||
begin
|
||||
inherited;
|
||||
Result := 0;
|
||||
if count > 0 then begin
|
||||
Result := FStream.Read( Pointer(@buffer[offset])^, count)
|
||||
end;
|
||||
|
||||
if count >= buflen-offset
|
||||
then count := buflen-offset;
|
||||
|
||||
if count > 0
|
||||
then Result := FStream.Read( PByteArray(pBuf)^[offset], count)
|
||||
else Result := 0;
|
||||
end;
|
||||
|
||||
function TThriftStreamAdapterDelphi.ToArray: TBytes;
|
||||
@@ -276,12 +295,11 @@ begin
|
||||
end
|
||||
end;
|
||||
|
||||
procedure TThriftStreamAdapterDelphi.Write(const buffer: TBytes; offset,
|
||||
count: Integer);
|
||||
procedure TThriftStreamAdapterDelphi.Write(const pBuf : Pointer; offset, count: Integer);
|
||||
begin
|
||||
inherited;
|
||||
if count > 0 then begin
|
||||
FStream.Write( Pointer(@buffer[offset])^, count)
|
||||
FStream.Write( PByteArray(pBuf)^[offset], count)
|
||||
end;
|
||||
end;
|
||||
|
||||
|
||||
113
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Transport.Pipes.pas
generated
vendored
113
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Transport.Pipes.pas
generated
vendored
@@ -48,16 +48,16 @@ type
|
||||
FOpenTimeOut : DWORD; // separate value to allow for fail-fast-on-open scenarios
|
||||
FOverlapped : Boolean;
|
||||
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); override;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; offset, count : Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; override;
|
||||
//procedure Open; override; - see derived classes
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
|
||||
function ReadDirect( var buffer: TBytes; offset: Integer; count: Integer): Integer;
|
||||
function ReadOverlapped( var buffer: TBytes; offset: Integer; count: Integer): Integer;
|
||||
procedure WriteDirect( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
procedure WriteOverlapped( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
function ReadDirect( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; overload;
|
||||
function ReadOverlapped( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; overload;
|
||||
procedure WriteDirect( const pBuf : Pointer; offset: Integer; count: Integer); overload;
|
||||
procedure WriteOverlapped( const pBuf : Pointer; offset: Integer; count: Integer); overload;
|
||||
|
||||
function IsOpen: Boolean; override;
|
||||
function ToArray: TBytes; override;
|
||||
@@ -310,34 +310,67 @@ begin
|
||||
end;
|
||||
|
||||
|
||||
procedure TPipeStreamBase.Write(const buffer: TBytes; offset, count: Integer);
|
||||
procedure TPipeStreamBase.Write( const pBuf : Pointer; offset, count : Integer);
|
||||
begin
|
||||
if FOverlapped
|
||||
then WriteOverlapped( buffer, offset, count)
|
||||
else WriteDirect( buffer, offset, count);
|
||||
then WriteOverlapped( pBuf, offset, count)
|
||||
else WriteDirect( pBuf, offset, count);
|
||||
end;
|
||||
|
||||
|
||||
function TPipeStreamBase.Read( var buffer: TBytes; offset, count: Integer): Integer;
|
||||
function TPipeStreamBase.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
begin
|
||||
if FOverlapped
|
||||
then result := ReadOverlapped( buffer, offset, count)
|
||||
else result := ReadDirect( buffer, offset, count);
|
||||
then result := ReadOverlapped( pBuf, buflen, offset, count)
|
||||
else result := ReadDirect( pBuf, buflen, offset, count);
|
||||
end;
|
||||
|
||||
|
||||
procedure TPipeStreamBase.WriteDirect(const buffer: TBytes; offset, count: Integer);
|
||||
procedure TPipeStreamBase.WriteDirect( const pBuf : Pointer; offset: Integer; count: Integer);
|
||||
var cbWritten : DWORD;
|
||||
begin
|
||||
if not IsOpen
|
||||
then raise TTransportExceptionNotOpen.Create('Called write on non-open pipe');
|
||||
|
||||
if not WriteFile( FPipe, buffer[offset], count, cbWritten, nil)
|
||||
if not WriteFile( FPipe, PByteArray(pBuf)^[offset], count, cbWritten, nil)
|
||||
then raise TTransportExceptionNotOpen.Create('Write to pipe failed');
|
||||
end;
|
||||
|
||||
|
||||
function TPipeStreamBase.ReadDirect( var buffer: TBytes; offset, count: Integer): Integer;
|
||||
procedure TPipeStreamBase.WriteOverlapped( const pBuf : Pointer; offset: Integer; count: Integer);
|
||||
var cbWritten, dwWait, dwError : DWORD;
|
||||
overlapped : IOverlappedHelper;
|
||||
begin
|
||||
if not IsOpen
|
||||
then raise TTransportExceptionNotOpen.Create('Called write on non-open pipe');
|
||||
|
||||
overlapped := TOverlappedHelperImpl.Create;
|
||||
|
||||
if not WriteFile( FPipe, PByteArray(pBuf)^[offset], count, cbWritten, overlapped.OverlappedPtr)
|
||||
then begin
|
||||
dwError := GetLastError;
|
||||
case dwError of
|
||||
ERROR_IO_PENDING : begin
|
||||
dwWait := overlapped.WaitFor(FTimeout);
|
||||
|
||||
if (dwWait = WAIT_TIMEOUT)
|
||||
then raise TTransportExceptionTimedOut.Create('Pipe write timed out');
|
||||
|
||||
if (dwWait <> WAIT_OBJECT_0)
|
||||
or not GetOverlappedResult( FPipe, overlapped.Overlapped, cbWritten, TRUE)
|
||||
then raise TTransportExceptionUnknown.Create('Pipe write error');
|
||||
end;
|
||||
|
||||
else
|
||||
raise TTransportExceptionUnknown.Create(SysErrorMessage(dwError));
|
||||
end;
|
||||
end;
|
||||
|
||||
ASSERT( DWORD(count) = cbWritten);
|
||||
end;
|
||||
|
||||
|
||||
function TPipeStreamBase.ReadDirect( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
var cbRead, dwErr : DWORD;
|
||||
bytes, retries : LongInt;
|
||||
bOk : Boolean;
|
||||
@@ -374,47 +407,14 @@ begin
|
||||
end;
|
||||
|
||||
// read the data (or block INFINITE-ly)
|
||||
bOk := ReadFile( FPipe, buffer[offset], count, cbRead, nil);
|
||||
bOk := ReadFile( FPipe, PByteArray(pBuf)^[offset], count, cbRead, nil);
|
||||
if (not bOk) and (GetLastError() <> ERROR_MORE_DATA)
|
||||
then result := 0 // No more data, possibly because client disconnected.
|
||||
else result := cbRead;
|
||||
end;
|
||||
|
||||
|
||||
procedure TPipeStreamBase.WriteOverlapped(const buffer: TBytes; offset, count: Integer);
|
||||
var cbWritten, dwWait, dwError : DWORD;
|
||||
overlapped : IOverlappedHelper;
|
||||
begin
|
||||
if not IsOpen
|
||||
then raise TTransportExceptionNotOpen.Create('Called write on non-open pipe');
|
||||
|
||||
overlapped := TOverlappedHelperImpl.Create;
|
||||
|
||||
if not WriteFile( FPipe, buffer[offset], count, cbWritten, overlapped.OverlappedPtr)
|
||||
then begin
|
||||
dwError := GetLastError;
|
||||
case dwError of
|
||||
ERROR_IO_PENDING : begin
|
||||
dwWait := overlapped.WaitFor(FTimeout);
|
||||
|
||||
if (dwWait = WAIT_TIMEOUT)
|
||||
then raise TTransportExceptionTimedOut.Create('Pipe write timed out');
|
||||
|
||||
if (dwWait <> WAIT_OBJECT_0)
|
||||
or not GetOverlappedResult( FPipe, overlapped.Overlapped, cbWritten, TRUE)
|
||||
then raise TTransportExceptionUnknown.Create('Pipe write error');
|
||||
end;
|
||||
|
||||
else
|
||||
raise TTransportExceptionUnknown.Create(SysErrorMessage(dwError));
|
||||
end;
|
||||
end;
|
||||
|
||||
ASSERT( DWORD(count) = cbWritten);
|
||||
end;
|
||||
|
||||
|
||||
function TPipeStreamBase.ReadOverlapped( var buffer: TBytes; offset, count: Integer): Integer;
|
||||
function TPipeStreamBase.ReadOverlapped( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
var cbRead, dwWait, dwError : DWORD;
|
||||
bOk : Boolean;
|
||||
overlapped : IOverlappedHelper;
|
||||
@@ -425,7 +425,7 @@ begin
|
||||
overlapped := TOverlappedHelperImpl.Create;
|
||||
|
||||
// read the data
|
||||
bOk := ReadFile( FPipe, buffer[offset], count, cbRead, overlapped.OverlappedPtr);
|
||||
bOk := ReadFile( FPipe, PByteArray(pBuf)^[offset], count, cbRead, overlapped.OverlappedPtr);
|
||||
if not bOk then begin
|
||||
dwError := GetLastError;
|
||||
case dwError of
|
||||
@@ -768,8 +768,6 @@ var sd : PSECURITY_DESCRIPTOR;
|
||||
sa : SECURITY_ATTRIBUTES; //TSecurityAttributes;
|
||||
hCAR, hPipeW, hCAW, hPipe : THandle;
|
||||
begin
|
||||
result := FALSE;
|
||||
|
||||
sd := PSECURITY_DESCRIPTOR( LocalAlloc( LPTR,SECURITY_DESCRIPTOR_MIN_LENGTH));
|
||||
try
|
||||
Win32Check( InitializeSecurityDescriptor( sd, SECURITY_DESCRIPTOR_REVISION));
|
||||
@@ -779,12 +777,14 @@ begin
|
||||
sa.lpSecurityDescriptor := sd;
|
||||
sa.bInheritHandle := TRUE; //allow passing handle to child
|
||||
|
||||
if not CreatePipe( hCAR, hPipeW, @sa, FBufSize) then begin //create stdin pipe
|
||||
Result := CreatePipe( hCAR, hPipeW, @sa, FBufSize); //create stdin pipe
|
||||
if not Result then begin //create stdin pipe
|
||||
raise TTransportExceptionNotOpen.Create('TServerPipe CreatePipe (anon) failed, '+SysErrorMessage(GetLastError));
|
||||
Exit;
|
||||
end;
|
||||
|
||||
if not CreatePipe( hPipe, hCAW, @sa, FBufSize) then begin //create stdout pipe
|
||||
Result := CreatePipe( hPipe, hCAW, @sa, FBufSize); //create stdout pipe
|
||||
if not Result then begin //create stdout pipe
|
||||
CloseHandle( hCAR);
|
||||
CloseHandle( hPipeW);
|
||||
raise TTransportExceptionNotOpen.Create('TServerPipe CreatePipe (anon) failed, '+SysErrorMessage(GetLastError));
|
||||
@@ -795,9 +795,6 @@ begin
|
||||
FClientAnonWrite := hCAW;
|
||||
FReadHandle := hPipe;
|
||||
FWriteHandle := hPipeW;
|
||||
|
||||
result := TRUE;
|
||||
|
||||
finally
|
||||
if sd <> nil then LocalFree( Cardinal(sd));
|
||||
end;
|
||||
|
||||
169
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Transport.pas
generated
vendored
169
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.Transport.pas
generated
vendored
@@ -44,16 +44,20 @@ uses
|
||||
|
||||
type
|
||||
ITransport = interface
|
||||
['{A4A9FC37-D620-44DC-AD21-662D16364CE4}']
|
||||
['{DB84961E-8BB3-4532-99E1-A8C7AC2300F7}']
|
||||
function GetIsOpen: Boolean;
|
||||
property IsOpen: Boolean read GetIsOpen;
|
||||
function Peek: Boolean;
|
||||
procedure Open;
|
||||
procedure Close;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer;
|
||||
function ReadAll(var buf: TBytes; off: Integer; len: Integer): Integer;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; overload;
|
||||
function Read(const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; overload;
|
||||
function ReadAll(var buf: TBytes; off: Integer; len: Integer): Integer; overload;
|
||||
function ReadAll(const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; overload;
|
||||
procedure Write( const buf: TBytes); overload;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); overload;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); overload;
|
||||
procedure Write( const pBuf : Pointer; len : Integer); overload;
|
||||
procedure Flush;
|
||||
end;
|
||||
|
||||
@@ -64,10 +68,14 @@ type
|
||||
function Peek: Boolean; virtual;
|
||||
procedure Open(); virtual; abstract;
|
||||
procedure Close(); virtual; abstract;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; virtual; abstract;
|
||||
function ReadAll(var buf: TBytes; off: Integer; len: Integer): Integer; virtual;
|
||||
procedure Write( const buf: TBytes); overload; virtual;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); overload; virtual; abstract;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; overload; inline;
|
||||
function Read(const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; overload; virtual; abstract;
|
||||
function ReadAll(var buf: TBytes; off: Integer; len: Integer): Integer; overload; inline;
|
||||
function ReadAll(const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; overload; virtual;
|
||||
procedure Write( const buf: TBytes); overload; inline;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); overload; inline;
|
||||
procedure Write( const pBuf : Pointer; len : Integer); overload; inline;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); overload; virtual; abstract;
|
||||
procedure Flush; virtual;
|
||||
end;
|
||||
|
||||
@@ -135,8 +143,8 @@ type
|
||||
function GetIsOpen: Boolean; override;
|
||||
procedure Open(); override;
|
||||
procedure Close(); override;
|
||||
function Read( var buf: TBytes; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); override;
|
||||
procedure Flush; override;
|
||||
|
||||
procedure SetConnectionTimeout(const Value: Integer);
|
||||
@@ -193,8 +201,8 @@ type
|
||||
SLEEP_TIME = 200;
|
||||
{$ENDIF}
|
||||
protected
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); override;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; offset, count: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Open; override;
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
@@ -233,8 +241,8 @@ type
|
||||
procedure Open; override;
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); override;
|
||||
constructor Create( const AInputStream : IThriftStream; const AOutputStream : IThriftStream);
|
||||
destructor Destroy; override;
|
||||
end;
|
||||
@@ -246,8 +254,8 @@ type
|
||||
FReadBuffer : TMemoryStream;
|
||||
FWriteBuffer : TMemoryStream;
|
||||
protected
|
||||
procedure Write( const buffer: TBytes; offset: Integer; count: Integer); override;
|
||||
function Read( var buffer: TBytes; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; offset: Integer; count: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; override;
|
||||
procedure Open; override;
|
||||
procedure Close; override;
|
||||
procedure Flush; override;
|
||||
@@ -299,8 +307,8 @@ type
|
||||
public
|
||||
procedure Open(); override;
|
||||
procedure Close(); override;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); override;
|
||||
constructor Create( const ATransport : IStreamTransport ); overload;
|
||||
constructor Create( const ATransport : IStreamTransport; ABufSize: Integer); overload;
|
||||
property UnderlyingTransport: ITransport read GetUnderlyingTransport;
|
||||
@@ -377,8 +385,8 @@ type
|
||||
function GetIsOpen: Boolean; override;
|
||||
|
||||
procedure Close(); override;
|
||||
function Read(var buf: TBytes; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const buf: TBytes; off: Integer; len: Integer); override;
|
||||
function Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; override;
|
||||
procedure Write( const pBuf : Pointer; off, len : Integer); override;
|
||||
procedure Flush; override;
|
||||
end;
|
||||
|
||||
@@ -404,24 +412,47 @@ begin
|
||||
Result := IsOpen;
|
||||
end;
|
||||
|
||||
function TTransportImpl.ReadAll( var buf: TBytes; off, len: Integer): Integer;
|
||||
var
|
||||
got : Integer;
|
||||
ret : Integer;
|
||||
function TTransportImpl.Read(var buf: TBytes; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
got := 0;
|
||||
while got < len do begin
|
||||
ret := Read( buf, off + got, len - got);
|
||||
if ret > 0
|
||||
then Inc( got, ret)
|
||||
else raise TTransportExceptionNotOpen.Create( 'Cannot read, Remote side has closed' );
|
||||
end;
|
||||
Result := got;
|
||||
if Length(buf) > 0
|
||||
then result := Read( @buf[0], Length(buf), off, len)
|
||||
else result := 0;
|
||||
end;
|
||||
|
||||
function TTransportImpl.ReadAll(var buf: TBytes; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
if Length(buf) > 0
|
||||
then result := ReadAll( @buf[0], Length(buf), off, len)
|
||||
else result := 0;
|
||||
end;
|
||||
|
||||
procedure TTransportImpl.Write( const buf: TBytes);
|
||||
begin
|
||||
Self.Write( buf, 0, Length(buf) );
|
||||
if Length(buf) > 0
|
||||
then Write( @buf[0], 0, Length(buf));
|
||||
end;
|
||||
|
||||
procedure TTransportImpl.Write( const buf: TBytes; off: Integer; len: Integer);
|
||||
begin
|
||||
if Length(buf) > 0
|
||||
then Write( @buf[0], off, len);
|
||||
end;
|
||||
|
||||
function TTransportImpl.ReadAll(const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer;
|
||||
var ret : Integer;
|
||||
begin
|
||||
result := 0;
|
||||
while result < len do begin
|
||||
ret := Read( pBuf, buflen, off + result, len - result);
|
||||
if ret > 0
|
||||
then Inc( result, ret)
|
||||
else raise TTransportExceptionNotOpen.Create( 'Cannot read, Remote side has closed' );
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure TTransportImpl.Write( const pBuf : Pointer; len : Integer);
|
||||
begin
|
||||
Self.Write( pBuf, 0, len);
|
||||
end;
|
||||
|
||||
{ THTTPClientImpl }
|
||||
@@ -501,14 +532,14 @@ begin
|
||||
// nothing to do
|
||||
end;
|
||||
|
||||
function THTTPClientImpl.Read( var buf: TBytes; off, len: Integer): Integer;
|
||||
function THTTPClientImpl.Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
if FInputStream = nil then begin
|
||||
raise TTransportExceptionNotOpen.Create('No request has been sent');
|
||||
end;
|
||||
|
||||
try
|
||||
Result := FInputStream.Read( buf, off, len )
|
||||
Result := FInputStream.Read( pBuf, buflen, off, len)
|
||||
except
|
||||
on E: Exception
|
||||
do raise TTransportExceptionUnknown.Create(E.Message);
|
||||
@@ -550,9 +581,9 @@ begin
|
||||
FReadTimeout := Value
|
||||
end;
|
||||
|
||||
procedure THTTPClientImpl.Write( const buf: TBytes; off, len: Integer);
|
||||
procedure THTTPClientImpl.Write( const pBuf : Pointer; off, len : Integer);
|
||||
begin
|
||||
FOutputStream.Write( buf, off, len);
|
||||
FOutputStream.Write( pBuf, off, len);
|
||||
end;
|
||||
|
||||
{ TTransportException }
|
||||
@@ -931,7 +962,7 @@ begin
|
||||
// nothing to do
|
||||
end;
|
||||
|
||||
function TBufferedStreamImpl.Read( var buffer: TBytes; offset: Integer; count: Integer): Integer;
|
||||
function TBufferedStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
var
|
||||
nRead : Integer;
|
||||
tempbuf : TBytes;
|
||||
@@ -954,7 +985,7 @@ begin
|
||||
|
||||
if FReadBuffer.Position < FReadBuffer.Size then begin
|
||||
nRead := Min( FReadBuffer.Size - FReadBuffer.Position, count);
|
||||
Inc( Result, FReadBuffer.Read( Pointer(@buffer[offset])^, nRead));
|
||||
Inc( Result, FReadBuffer.Read( PByteArray(pBuf)^[offset], nRead));
|
||||
Dec( count, nRead);
|
||||
Inc( offset, nRead);
|
||||
end;
|
||||
@@ -979,12 +1010,12 @@ begin
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure TBufferedStreamImpl.Write( const buffer: TBytes; offset: Integer; count: Integer);
|
||||
procedure TBufferedStreamImpl.Write( const pBuf : Pointer; offset: Integer; count: Integer);
|
||||
begin
|
||||
inherited;
|
||||
if count > 0 then begin
|
||||
if IsOpen then begin
|
||||
FWriteBuffer.Write( Pointer(@buffer[offset])^, count );
|
||||
FWriteBuffer.Write( PByteArray(pBuf)^[offset], count );
|
||||
if FWriteBuffer.Size > FBufSize then begin
|
||||
Flush;
|
||||
end;
|
||||
@@ -1043,22 +1074,22 @@ begin
|
||||
|
||||
end;
|
||||
|
||||
function TStreamTransportImpl.Read(var buf: TBytes; off, len: Integer): Integer;
|
||||
function TStreamTransportImpl.Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
if FInputStream = nil then begin
|
||||
raise TTransportExceptionNotOpen.Create('Cannot read from null inputstream' );
|
||||
end;
|
||||
|
||||
Result := FInputStream.Read( buf, off, len );
|
||||
Result := FInputStream.Read( pBuf,buflen, off, len );
|
||||
end;
|
||||
|
||||
procedure TStreamTransportImpl.Write(const buf: TBytes; off, len: Integer);
|
||||
procedure TStreamTransportImpl.Write( const pBuf : Pointer; off, len : Integer);
|
||||
begin
|
||||
if FOutputStream = nil then begin
|
||||
raise TTransportExceptionNotOpen.Create('Cannot write to null outputstream' );
|
||||
end;
|
||||
|
||||
FOutputStream.Write( buf, off, len );
|
||||
FOutputStream.Write( pBuf, off, len );
|
||||
end;
|
||||
|
||||
{ TBufferedTransportImpl }
|
||||
@@ -1114,18 +1145,18 @@ begin
|
||||
FTransport.Open
|
||||
end;
|
||||
|
||||
function TBufferedTransportImpl.Read(var buf: TBytes; off, len: Integer): Integer;
|
||||
function TBufferedTransportImpl.Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
Result := 0;
|
||||
if FInputBuffer <> nil then begin
|
||||
Result := FInputBuffer.Read( buf, off, len );
|
||||
Result := FInputBuffer.Read( pBuf,buflen, off, len );
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure TBufferedTransportImpl.Write(const buf: TBytes; off, len: Integer);
|
||||
procedure TBufferedTransportImpl.Write( const pBuf : Pointer; off, len : Integer);
|
||||
begin
|
||||
if FOutputBuffer <> nil then begin
|
||||
FOutputBuffer.Write( buf, off, len );
|
||||
FOutputBuffer.Write( pBuf, off, len );
|
||||
end;
|
||||
end;
|
||||
|
||||
@@ -1222,24 +1253,21 @@ begin
|
||||
FTransport.Open;
|
||||
end;
|
||||
|
||||
function TFramedTransportImpl.Read(var buf: TBytes; off, len: Integer): Integer;
|
||||
var
|
||||
got : Integer;
|
||||
function TFramedTransportImpl.Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer;
|
||||
begin
|
||||
if FReadBuffer <> nil then begin
|
||||
if len > 0
|
||||
then got := FReadBuffer.Read( Pointer(@buf[off])^, len )
|
||||
else got := 0;
|
||||
|
||||
if got > 0 then begin
|
||||
Result := got;
|
||||
if len > (buflen-off)
|
||||
then len := buflen-off;
|
||||
|
||||
if (FReadBuffer <> nil) and (len > 0) then begin
|
||||
result := FReadBuffer.Read( PByteArray(pBuf)^[off], len);
|
||||
if result > 0 then begin
|
||||
Exit;
|
||||
end;
|
||||
end;
|
||||
|
||||
ReadFrame;
|
||||
if len > 0
|
||||
then Result := FReadBuffer.Read( Pointer(@buf[off])^, len)
|
||||
then Result := FReadBuffer.Read( PByteArray(pBuf)^[off], len)
|
||||
else Result := 0;
|
||||
end;
|
||||
|
||||
@@ -1260,14 +1288,15 @@ begin
|
||||
FTransport.ReadAll( buff, 0, size );
|
||||
FReadBuffer.Free;
|
||||
FReadBuffer := TMemoryStream.Create;
|
||||
FReadBuffer.Write( Pointer(@buff[0])^, size );
|
||||
if Length(buff) > 0
|
||||
then FReadBuffer.Write( Pointer(@buff[0])^, size );
|
||||
FReadBuffer.Position := 0;
|
||||
end;
|
||||
|
||||
procedure TFramedTransportImpl.Write(const buf: TBytes; off, len: Integer);
|
||||
procedure TFramedTransportImpl.Write( const pBuf : Pointer; off, len : Integer);
|
||||
begin
|
||||
if len > 0
|
||||
then FWriteBuffer.Write( Pointer(@buf[off])^, len );
|
||||
then FWriteBuffer.Write( PByteArray(pBuf)^[off], len );
|
||||
end;
|
||||
|
||||
{ TFramedTransport.TFactory }
|
||||
@@ -1447,7 +1476,7 @@ end;
|
||||
{$ENDIF}
|
||||
|
||||
{$IFDEF OLD_SOCKETS}
|
||||
function TTcpSocketStreamImpl.Read(var buffer: TBytes; offset, count: Integer): Integer;
|
||||
function TTcpSocketStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
// old sockets version
|
||||
var wfd : TWaitForData;
|
||||
wsaError,
|
||||
@@ -1462,7 +1491,7 @@ begin
|
||||
else msecs := DEFAULT_THRIFT_TIMEOUT;
|
||||
|
||||
result := 0;
|
||||
pDest := Pointer(@buffer[offset]);
|
||||
pDest := @(PByteArray(pBuf)^[offset]);
|
||||
while count > 0 do begin
|
||||
|
||||
while TRUE do begin
|
||||
@@ -1513,7 +1542,7 @@ begin
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure TTcpSocketStreamImpl.Write(const buffer: TBytes; offset, count: Integer);
|
||||
procedure TTcpSocketStreamImpl.Write( const pBuf : Pointer; offset, count: Integer);
|
||||
// old sockets version
|
||||
var bCanWrite, bError : Boolean;
|
||||
retval, wsaError : Integer;
|
||||
@@ -1537,12 +1566,12 @@ begin
|
||||
if bError or not bCanWrite
|
||||
then raise TTransportExceptionUnknown.Create('unknown error');
|
||||
|
||||
FTcpClient.SendBuf( Pointer(@buffer[offset])^, count);
|
||||
FTcpClient.SendBuf( PByteArray(pBuf)^[offset], count);
|
||||
end;
|
||||
|
||||
{$ELSE}
|
||||
|
||||
function TTcpSocketStreamImpl.Read(var buffer: TBytes; offset, count: Integer): Integer;
|
||||
function TTcpSocketStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer;
|
||||
// new sockets version
|
||||
var nBytes : Integer;
|
||||
pDest : PByte;
|
||||
@@ -1550,7 +1579,7 @@ begin
|
||||
inherited;
|
||||
|
||||
result := 0;
|
||||
pDest := Pointer(@buffer[offset]);
|
||||
pDest := @(PByteArray(pBuf)^[offset]);
|
||||
while count > 0 do begin
|
||||
nBytes := FTcpClient.Read(pDest^, count);
|
||||
if nBytes = 0 then Exit;
|
||||
@@ -1579,7 +1608,7 @@ begin
|
||||
SetLength(Result, Length(Result) - 1024 + len);
|
||||
end;
|
||||
|
||||
procedure TTcpSocketStreamImpl.Write(const buffer: TBytes; offset, count: Integer);
|
||||
procedure TTcpSocketStreamImpl.Write( const pBuf : Pointer; offset, count: Integer);
|
||||
// new sockets version
|
||||
begin
|
||||
inherited;
|
||||
@@ -1587,7 +1616,7 @@ begin
|
||||
if not FTcpClient.IsOpen
|
||||
then raise TTransportExceptionNotOpen.Create('not open');
|
||||
|
||||
FTcpClient.Write(buffer[offset], count);
|
||||
FTcpClient.Write( PByteArray(pBuf)^[offset], count);
|
||||
end;
|
||||
|
||||
{$ENDIF}
|
||||
|
||||
13
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.pas
generated
vendored
13
vendor/github.com/apache/thrift/lib/delphi/src/Thrift.pas
generated
vendored
@@ -172,10 +172,10 @@ end;
|
||||
|
||||
class function TApplicationException.Read( const iprot: IProtocol): TApplicationException;
|
||||
var
|
||||
field : IField;
|
||||
field : TThriftField;
|
||||
msg : string;
|
||||
typ : TExceptionType;
|
||||
struc : IStruct;
|
||||
struc : TThriftStruct;
|
||||
begin
|
||||
msg := '';
|
||||
typ := TExceptionType.Unknown;
|
||||
@@ -220,12 +220,11 @@ end;
|
||||
|
||||
procedure TApplicationException.Write( const oprot: IProtocol);
|
||||
var
|
||||
struc : IStruct;
|
||||
field : IField;
|
||||
|
||||
struc : TThriftStruct;
|
||||
field : TThriftField;
|
||||
begin
|
||||
struc := TStructImpl.Create( 'TApplicationException' );
|
||||
field := TFieldImpl.Create;
|
||||
Init(struc, 'TApplicationException');
|
||||
Init(field);
|
||||
|
||||
oprot.WriteStructBegin( struc );
|
||||
if Message <> '' then
|
||||
|
||||
36
vendor/github.com/apache/thrift/lib/delphi/test/TestClient.pas
generated
vendored
36
vendor/github.com/apache/thrift/lib/delphi/test/TestClient.pas
generated
vendored
@@ -22,7 +22,8 @@ unit TestClient;
|
||||
{$I ../src/Thrift.Defines.inc}
|
||||
|
||||
{.$DEFINE StressTest} // activate to stress-test the server with frequent connects/disconnects
|
||||
{.$DEFINE PerfTest} // activate to activate the performance test
|
||||
{.$DEFINE PerfTest} // activate the performance test
|
||||
{$DEFINE Exceptions} // activate the exceptions test (or disable while debugging)
|
||||
|
||||
interface
|
||||
|
||||
@@ -258,7 +259,7 @@ begin
|
||||
if s = 'buffered' then Include( layered, trns_Buffered)
|
||||
else if s = 'framed' then Include( layered, trns_Framed)
|
||||
else if s = 'http' then endpoint := trns_Http
|
||||
else if s = 'evhttp' then endpoint := trns_AnonPipes
|
||||
else if s = 'evhttp' then endpoint := trns_EvHttp
|
||||
else InvalidArgs;
|
||||
end
|
||||
else if s = '--protocol' then begin
|
||||
@@ -462,6 +463,7 @@ begin
|
||||
StressTest( client);
|
||||
{$ENDIF StressTest}
|
||||
|
||||
{$IFDEF Exceptions}
|
||||
// in-depth exception test
|
||||
// (1) do we get an exception at all?
|
||||
// (2) do we get the right exception?
|
||||
@@ -510,6 +512,7 @@ begin
|
||||
on e:TTransportException do Expect( FALSE, 'Unexpected : "'+e.ToString+'"');
|
||||
on e:Exception do Expect( FALSE, 'Unexpected exception "'+e.ClassName+'"');
|
||||
end;
|
||||
{$ENDIF Exceptions}
|
||||
|
||||
|
||||
// simple things
|
||||
@@ -525,6 +528,9 @@ begin
|
||||
s := client.testString('Test');
|
||||
Expect( s = 'Test', 'testString(''Test'') = "'+s+'"');
|
||||
|
||||
s := client.testString(''); // empty string
|
||||
Expect( s = '', 'testString('''') = "'+s+'"');
|
||||
|
||||
s := client.testString(HUGE_TEST_STRING);
|
||||
Expect( length(s) = length(HUGE_TEST_STRING),
|
||||
'testString( length(HUGE_TEST_STRING) = '+IntToStr(Length(HUGE_TEST_STRING))+') '
|
||||
@@ -540,6 +546,7 @@ begin
|
||||
i64 := client.testI64(-34359738368);
|
||||
Expect( i64 = -34359738368, 'testI64(-34359738368) = ' + IntToStr( i64));
|
||||
|
||||
// random binary
|
||||
binOut := PrepareBinaryData( TRUE);
|
||||
Console.WriteLine('testBinary('+BytesToHex(binOut)+')');
|
||||
try
|
||||
@@ -552,6 +559,19 @@ begin
|
||||
on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message);
|
||||
end;
|
||||
|
||||
// empty binary
|
||||
SetLength( binOut, 0);
|
||||
Console.WriteLine('testBinary('+BytesToHex(binOut)+')');
|
||||
try
|
||||
binIn := client.testBinary(binOut);
|
||||
Expect( Length(binOut) = Length(binIn), 'testBinary(): length '+IntToStr(Length(binOut))+' = '+IntToStr(Length(binIn)));
|
||||
i32 := Min( Length(binOut), Length(binIn));
|
||||
Expect( CompareMem( binOut, binIn, i32), 'testBinary('+BytesToHex(binOut)+') = '+BytesToHex(binIn));
|
||||
except
|
||||
on e:TApplicationException do Console.WriteLine('testBinary(): '+e.Message);
|
||||
on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message);
|
||||
end;
|
||||
|
||||
Console.WriteLine('testDouble(5.325098235)');
|
||||
dub := client.testDouble(5.325098235);
|
||||
Expect( abs(dub-5.325098235) < 1e-14, 'testDouble(5.325098235) = ' + FloatToStr( dub));
|
||||
@@ -1037,8 +1057,8 @@ procedure TClientThread.JSONProtocolReadWriteTest;
|
||||
// other clients or servers expect as the real JSON. This is beyond the scope of this test.
|
||||
var prot : IProtocol;
|
||||
stm : TStringStream;
|
||||
list : IList;
|
||||
binary, binRead : TBytes;
|
||||
list : TThriftList;
|
||||
binary, binRead, emptyBinary : TBytes;
|
||||
i,iErr : Integer;
|
||||
const
|
||||
TEST_SHORT = ShortInt( $FE);
|
||||
@@ -1061,6 +1081,7 @@ begin
|
||||
|
||||
// prepare binary data
|
||||
binary := PrepareBinaryData( FALSE);
|
||||
SetLength( emptyBinary, 0); // empty binary data block
|
||||
|
||||
// output setup
|
||||
prot := TJSONProtocolImpl.Create(
|
||||
@@ -1068,7 +1089,8 @@ begin
|
||||
nil, TThriftStreamAdapterDelphi.Create( stm, FALSE)));
|
||||
|
||||
// write
|
||||
prot.WriteListBegin( TListImpl.Create( TType.String_, 9));
|
||||
Init( list, TType.String_, 9);
|
||||
prot.WriteListBegin( list);
|
||||
prot.WriteBool( TRUE);
|
||||
prot.WriteBool( FALSE);
|
||||
prot.WriteByte( TEST_SHORT);
|
||||
@@ -1078,6 +1100,8 @@ begin
|
||||
prot.WriteDouble( TEST_DOUBLE);
|
||||
prot.WriteString( TEST_STRING);
|
||||
prot.WriteBinary( binary);
|
||||
prot.WriteString( ''); // empty string
|
||||
prot.WriteBinary( emptyBinary); // empty binary data block
|
||||
prot.WriteListEnd;
|
||||
|
||||
// input setup
|
||||
@@ -1100,6 +1124,8 @@ begin
|
||||
Expect( abs(prot.ReadDouble-TEST_DOUBLE) < abs(DELTA_DOUBLE), 'WriteDouble/ReadDouble');
|
||||
Expect( prot.ReadString = TEST_STRING, 'WriteString/ReadString');
|
||||
binRead := prot.ReadBinary;
|
||||
Expect( Length(prot.ReadString) = 0, 'WriteString/ReadString (empty string)');
|
||||
Expect( Length(prot.ReadBinary) = 0, 'empty WriteBinary/ReadBinary (empty data block)');
|
||||
prot.ReadListEnd;
|
||||
|
||||
// test binary data
|
||||
|
||||
45
vendor/github.com/apache/thrift/lib/delphi/test/codegen/ReservedKeywords.thrift
generated
vendored
45
vendor/github.com/apache/thrift/lib/delphi/test/codegen/ReservedKeywords.thrift
generated
vendored
@@ -52,4 +52,49 @@ enum keywords {
|
||||
}
|
||||
|
||||
|
||||
struct Struct_lists {
|
||||
1: list<Struct_simple> init;
|
||||
2: list<Struct_simple> struc;
|
||||
3: list<Struct_simple> field;
|
||||
4: list<Struct_simple> field_;
|
||||
5: list<Struct_simple> tracker;
|
||||
6: list<Struct_simple> Self;
|
||||
}
|
||||
|
||||
struct Struct_structs {
|
||||
1: Struct_simple init;
|
||||
2: Struct_simple struc;
|
||||
3: Struct_simple field;
|
||||
4: Struct_simple field_;
|
||||
5: Struct_simple tracker;
|
||||
6: Struct_simple Self;
|
||||
}
|
||||
|
||||
struct Struct_simple {
|
||||
1: bool init;
|
||||
2: bool struc;
|
||||
3: bool field;
|
||||
4: bool field_;
|
||||
5: bool tracker;
|
||||
6: bool Self;
|
||||
}
|
||||
|
||||
struct Struct_strings {
|
||||
1: string init;
|
||||
2: string struc;
|
||||
3: string field;
|
||||
4: string field_;
|
||||
5: string tracker;
|
||||
6: string Self;
|
||||
}
|
||||
|
||||
struct Struct_binary {
|
||||
1: binary init;
|
||||
2: binary struc;
|
||||
3: binary field;
|
||||
4: binary field_;
|
||||
5: binary tracker;
|
||||
6: binary Self;
|
||||
}
|
||||
|
||||
|
||||
|
||||
5
vendor/github.com/apache/thrift/lib/delphi/test/serializer/TestSerializer.Data.pas
generated
vendored
5
vendor/github.com/apache/thrift/lib/delphi/test/serializer/TestSerializer.Data.pas
generated
vendored
@@ -22,6 +22,7 @@ unit TestSerializer.Data;
|
||||
interface
|
||||
|
||||
uses
|
||||
SysUtils,
|
||||
Thrift.Collections,
|
||||
DebugProtoTest;
|
||||
|
||||
@@ -194,7 +195,7 @@ begin
|
||||
{$IF cDebugProtoTest_Option_AnsiStr_Binary}
|
||||
result.SetBase64('base64');
|
||||
{$ELSE}
|
||||
not yet impl
|
||||
result.SetBase64( TEncoding.UTF8.GetBytes('base64'));
|
||||
{$IFEND}
|
||||
|
||||
// byte, i16, and i64 lists are populated by default constructor
|
||||
@@ -338,7 +339,7 @@ begin
|
||||
{$IF cDebugProtoTest_Option_AnsiStr_Binary}
|
||||
result.A_binary := AnsiString( #0#1#2#3#4#5#6#7#8);
|
||||
{$ELSE}
|
||||
not yet impl
|
||||
result.A_binary := TEncoding.UTF8.GetBytes( #0#1#2#3#4#5#6#7#8);
|
||||
{$IFEND}
|
||||
end;
|
||||
|
||||
|
||||
1
vendor/github.com/apache/thrift/lib/delphi/test/serializer/TestSerializer.dpr
generated
vendored
1
vendor/github.com/apache/thrift/lib/delphi/test/serializer/TestSerializer.dpr
generated
vendored
@@ -35,6 +35,7 @@ uses
|
||||
Thrift.Serializer in '..\..\src\Thrift.Serializer.pas',
|
||||
Thrift.Stream in '..\..\src\Thrift.Stream.pas',
|
||||
Thrift.TypeRegistry in '..\..\src\Thrift.TypeRegistry.pas',
|
||||
ReservedKeywords,
|
||||
DebugProtoTest,
|
||||
TestSerializer.Data;
|
||||
|
||||
|
||||
4
vendor/github.com/apache/thrift/lib/delphi/test/skip/skiptest_version1.dpr
generated
vendored
4
vendor/github.com/apache/thrift/lib/delphi/test/skip/skiptest_version1.dpr
generated
vendored
@@ -44,7 +44,7 @@ const
|
||||
function CreatePing : IPing;
|
||||
begin
|
||||
result := TPingImpl.Create;
|
||||
result.Version1 := Skiptest.One.TConstants.SKIPTESTSERVICE_VERSION;
|
||||
result.Version1 := Tskiptest_version_1Constants.SKIPTESTSERVICE_VERSION;
|
||||
end;
|
||||
|
||||
|
||||
@@ -179,7 +179,7 @@ const
|
||||
FILE_JSON = 'pingpong.json';
|
||||
begin
|
||||
try
|
||||
Writeln( 'Delphi SkipTest '+IntToStr(TConstants.SKIPTESTSERVICE_VERSION)+' using '+Thrift.Version);
|
||||
Writeln( 'Delphi SkipTest '+IntToStr(Tskiptest_version_1Constants.SKIPTESTSERVICE_VERSION)+' using '+Thrift.Version);
|
||||
|
||||
Writeln;
|
||||
Writeln('Binary protocol');
|
||||
|
||||
4
vendor/github.com/apache/thrift/lib/delphi/test/skip/skiptest_version2.dpr
generated
vendored
4
vendor/github.com/apache/thrift/lib/delphi/test/skip/skiptest_version2.dpr
generated
vendored
@@ -45,7 +45,7 @@ var list : IThriftList<IPong>;
|
||||
set_ : IHashSet<string>;
|
||||
begin
|
||||
result := TPingImpl.Create;
|
||||
result.Version1 := Skiptest.Two.TConstants.SKIPTESTSERVICE_VERSION;
|
||||
result.Version1 := Tskiptest_version_2Constants.SKIPTESTSERVICE_VERSION;
|
||||
result.BoolVal := TRUE;
|
||||
result.ByteVal := 2;
|
||||
result.DbVal := 3;
|
||||
@@ -206,7 +206,7 @@ const
|
||||
FILE_JSON = 'pingpong.json';
|
||||
begin
|
||||
try
|
||||
Writeln( 'Delphi SkipTest '+IntToStr(TConstants.SKIPTESTSERVICE_VERSION)+' using '+Thrift.Version);
|
||||
Writeln( 'Delphi SkipTest '+IntToStr(Tskiptest_version_2Constants.SKIPTESTSERVICE_VERSION)+' using '+Thrift.Version);
|
||||
|
||||
Writeln;
|
||||
Writeln('Binary protocol');
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/erl/coding_standards.md
generated
vendored
2
vendor/github.com/apache/thrift/lib/erl/coding_standards.md
generated
vendored
@@ -1 +1,3 @@
|
||||
Please follow [General Coding Standards](/doc/coding_standards.md)
|
||||
|
||||
Particularly for Erlang please follow the Erlang [Programming Rules and Conventions](http://www.erlang.se/doc/programming_rules.shtml).
|
||||
|
||||
6
vendor/github.com/apache/thrift/lib/erl/src/thrift_reconnecting_client.erl
generated
vendored
6
vendor/github.com/apache/thrift/lib/erl/src/thrift_reconnecting_client.erl
generated
vendored
@@ -36,7 +36,7 @@
|
||||
terminate/2,
|
||||
code_change/3 ]).
|
||||
|
||||
-record( state, { client = nil,
|
||||
-record( state, { client = nil,
|
||||
host,
|
||||
port,
|
||||
thrift_svc,
|
||||
@@ -226,9 +226,9 @@ timer_fun() ->
|
||||
end.
|
||||
-else.
|
||||
timer_fun() ->
|
||||
T1 = erlang:now(),
|
||||
T1 = erlang:timestamp(),
|
||||
fun() ->
|
||||
T2 = erlang:now(),
|
||||
T2 = erlang:timestamp(),
|
||||
timer:now_diff(T2, T1)
|
||||
end.
|
||||
-endif.
|
||||
|
||||
1
vendor/github.com/apache/thrift/lib/go/test/Makefile.am
generated
vendored
1
vendor/github.com/apache/thrift/lib/go/test/Makefile.am
generated
vendored
@@ -21,7 +21,6 @@ if GOVERSION_LT_17
|
||||
COMPILER_EXTRAFLAG=",legacy_context"
|
||||
endif
|
||||
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
THRIFTARGS = -out gopath/src/ --gen go:thrift_import=thrift$(COMPILER_EXTRAFLAG)
|
||||
THRIFTTEST = $(top_srcdir)/test/ThriftTest.thrift
|
||||
|
||||
|
||||
16
vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
generated
vendored
16
vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
generated
vendored
@@ -30,6 +30,17 @@ const (
|
||||
PROTOCOL_ERROR = 7
|
||||
)
|
||||
|
||||
var defaultApplicationExceptionMessage = map[int32]string{
|
||||
UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
|
||||
UNKNOWN_METHOD: "unknown method",
|
||||
INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
|
||||
WRONG_METHOD_NAME: "wrong method name",
|
||||
BAD_SEQUENCE_ID: "bad sequence ID",
|
||||
MISSING_RESULT: "missing result",
|
||||
INTERNAL_ERROR: "unknown internal error",
|
||||
PROTOCOL_ERROR: "unknown protocol error",
|
||||
}
|
||||
|
||||
// Application level Thrift exception
|
||||
type TApplicationException interface {
|
||||
TException
|
||||
@@ -44,7 +55,10 @@ type tApplicationException struct {
|
||||
}
|
||||
|
||||
func (e tApplicationException) Error() string {
|
||||
return e.message
|
||||
if e.message != "" {
|
||||
return e.message
|
||||
}
|
||||
return defaultApplicationExceptionMessage[e.type_]
|
||||
}
|
||||
|
||||
func NewTApplicationException(type_ int32, message string) TApplicationException {
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go
generated
vendored
2
vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
func TestTApplicationException(t *testing.T) {
|
||||
exc := NewTApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "")
|
||||
if exc.Error() != "" {
|
||||
if exc.Error() != defaultApplicationExceptionMessage[UNKNOWN_APPLICATION_EXCEPTION] {
|
||||
t.Fatalf("Expected empty string for exception but found '%s'", exc.Error())
|
||||
}
|
||||
if exc.TypeId() != UNKNOWN_APPLICATION_EXCEPTION {
|
||||
|
||||
6
vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
generated
vendored
6
vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
generated
vendored
@@ -90,7 +90,8 @@ func (p *TSSLSocket) Open() error {
|
||||
// If we have a hostname, we need to pass the hostname to tls.Dial for
|
||||
// certificate hostname checks.
|
||||
if p.hostPort != "" {
|
||||
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
|
||||
if p.conn, err = tls.DialWithDialer(&net.Dialer{
|
||||
Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
} else {
|
||||
@@ -106,7 +107,8 @@ func (p *TSSLSocket) Open() error {
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
if p.conn, err = tls.DialWithDialer(&net.Dialer{
|
||||
Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
1
vendor/github.com/apache/thrift/lib/haxe/test/Makefile.am
generated
vendored
1
vendor/github.com/apache/thrift/lib/haxe/test/Makefile.am
generated
vendored
@@ -17,7 +17,6 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
THRIFT = $(top_builddir)/compiler/cpp/thrift
|
||||
THRIFTCMD = $(THRIFT) --gen haxe -r
|
||||
THRIFTTEST = $(top_srcdir)/test/ThriftTest.thrift
|
||||
AGGR = $(top_srcdir)/contrib/async-test/aggr.thrift
|
||||
|
||||
2
vendor/github.com/apache/thrift/lib/hs/thrift.cabal
generated
vendored
2
vendor/github.com/apache/thrift/lib/hs/thrift.cabal
generated
vendored
@@ -40,7 +40,7 @@ Library
|
||||
Hs-Source-Dirs:
|
||||
src
|
||||
Build-Depends:
|
||||
base >= 4, base < 5, containers, ghc-prim, attoparsec, binary, bytestring >= 0.10, base64-bytestring, hashable, HTTP, text, hspec-core < 2.4.0, unordered-containers >= 0.2.6, vector >= 0.10.12.2, QuickCheck >= 2.8.2, split
|
||||
base >= 4, base < 5, containers, ghc-prim, attoparsec, binary, bytestring >= 0.10, base64-bytestring, hashable, HTTP, text, hspec-core > 2.4.0, unordered-containers >= 0.2.6, vector >= 0.10.12.2, QuickCheck >= 2.8.2, split
|
||||
if flag(network-uri)
|
||||
build-depends: network-uri >= 2.6, network >= 2.6
|
||||
else
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user