diff --git a/api/datastore/datastore.go b/api/datastore/datastore.go
index 67f6a25c4..6b0a80242 100644
--- a/api/datastore/datastore.go
+++ b/api/datastore/datastore.go
@@ -5,11 +5,21 @@ import (
"net/url"
"github.com/Sirupsen/logrus"
+ "gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/datastore/sql"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
func New(dbURL string) (models.Datastore, error) {
+ ds, err := newds(dbURL) // teehee
+ if err != nil {
+ return nil, err
+ }
+
+ return datastoreutil.MetricDS(datastoreutil.NewValidator(ds)), nil
+}
+
+func newds(dbURL string) (models.Datastore, error) {
u, err := url.Parse(dbURL)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"url": dbURL}).Fatal("bad DB URL")
diff --git a/api/datastore/internal/datastoreutil/metrics.go b/api/datastore/internal/datastoreutil/metrics.go
new file mode 100644
index 000000000..057f705e6
--- /dev/null
+++ b/api/datastore/internal/datastoreutil/metrics.go
@@ -0,0 +1,122 @@
+package datastoreutil
+
+import (
+ "context"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/opentracing/opentracing-go"
+ "gitlab-odx.oracle.com/odx/functions/api/models"
+)
+
+func MetricDS(ds models.Datastore) models.Datastore {
+ return &metricds{ds}
+}
+
+type metricds struct {
+ ds models.Datastore
+}
+
+func (m *metricds) GetApp(ctx context.Context, appName string) (*models.App, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_app")
+ defer span.Finish()
+ return m.ds.GetApp(ctx, appName)
+}
+
+func (m *metricds) GetApps(ctx context.Context, filter *models.AppFilter) ([]*models.App, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_apps")
+ defer span.Finish()
+ return m.ds.GetApps(ctx, filter)
+}
+
+func (m *metricds) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_insert_app")
+ defer span.Finish()
+ return m.ds.InsertApp(ctx, app)
+}
+
+func (m *metricds) UpdateApp(ctx context.Context, app *models.App) (*models.App, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_update_app")
+ defer span.Finish()
+ return m.ds.UpdateApp(ctx, app)
+}
+
+func (m *metricds) RemoveApp(ctx context.Context, appName string) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_remove_app")
+ defer span.Finish()
+ return m.ds.RemoveApp(ctx, appName)
+}
+
+func (m *metricds) GetRoute(ctx context.Context, appName, routePath string) (*models.Route, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_route")
+ defer span.Finish()
+ return m.ds.GetRoute(ctx, appName, routePath)
+}
+
+func (m *metricds) GetRoutes(ctx context.Context, filter *models.RouteFilter) (routes []*models.Route, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_routes")
+ defer span.Finish()
+ return m.ds.GetRoutes(ctx, filter)
+}
+
+func (m *metricds) GetRoutesByApp(ctx context.Context, appName string, filter *models.RouteFilter) (routes []*models.Route, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_routes_by_app")
+ defer span.Finish()
+ return m.ds.GetRoutesByApp(ctx, appName, filter)
+}
+
+func (m *metricds) InsertRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_insert_route")
+ defer span.Finish()
+ return m.ds.InsertRoute(ctx, route)
+}
+
+func (m *metricds) UpdateRoute(ctx context.Context, route *models.Route) (*models.Route, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_update_route")
+ defer span.Finish()
+ return m.ds.UpdateRoute(ctx, route)
+}
+
+func (m *metricds) RemoveRoute(ctx context.Context, appName, routePath string) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_remove_route")
+ defer span.Finish()
+ return m.ds.RemoveRoute(ctx, appName, routePath)
+}
+
+func (m *metricds) InsertTask(ctx context.Context, task *models.Task) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_insert_task")
+ defer span.Finish()
+ return m.ds.InsertTask(ctx, task)
+}
+
+func (m *metricds) GetTask(ctx context.Context, callID string) (*models.FnCall, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_task")
+ defer span.Finish()
+ return m.ds.GetTask(ctx, callID)
+}
+
+func (m *metricds) GetTasks(ctx context.Context, filter *models.CallFilter) (models.FnCalls, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_tasks")
+ defer span.Finish()
+ return m.ds.GetTasks(ctx, filter)
+}
+
+func (m *metricds) InsertLog(ctx context.Context, callID string, callLog string) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_insert_log")
+ defer span.Finish()
+ return m.ds.InsertLog(ctx, callID, callLog)
+}
+
+func (m *metricds) GetLog(ctx context.Context, callID string) (*models.FnCallLog, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_get_log")
+ defer span.Finish()
+ return m.ds.GetLog(ctx, callID)
+}
+
+func (m *metricds) DeleteLog(ctx context.Context, callID string) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "ds_delete_log")
+ defer span.Finish()
+ return m.ds.DeleteLog(ctx, callID)
+}
+
+// instant & no context ;)
+func (m *metricds) GetDatabase() *sqlx.DB { return m.ds.GetDatabase() }
diff --git a/api/datastore/internal/datastoreutil/shared.go b/api/datastore/internal/datastoreutil/shared.go
deleted file mode 100644
index 0842eb934..000000000
--- a/api/datastore/internal/datastoreutil/shared.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package datastoreutil
-
-import (
- "bytes"
- "database/sql"
- "encoding/json"
- "fmt"
- "strings"
-
- "gitlab-odx.oracle.com/odx/functions/api/models"
-)
-
-// TODO scrap for sqlx
-
-type RowScanner interface {
- Scan(dest ...interface{}) error
-}
-
-func ScanLog(scanner RowScanner, log *models.FnCallLog) error {
- return scanner.Scan(
- &log.CallID,
- &log.Log,
- )
-}
-
-func ScanRoute(scanner RowScanner, route *models.Route) error {
- var headerStr string
- var configStr string
-
- err := scanner.Scan(
- &route.AppName,
- &route.Path,
- &route.Image,
- &route.Format,
- &route.Memory,
- &route.Type,
- &route.Timeout,
- &route.IdleTimeout,
- &headerStr,
- &configStr,
- )
- if err != nil {
- return err
- }
-
- if len(headerStr) > 0 {
- err = json.Unmarshal([]byte(headerStr), &route.Headers)
- if err != nil {
- return err
- }
- }
-
- if len(configStr) > 0 {
- err = json.Unmarshal([]byte(configStr), &route.Config)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func ScanApp(scanner RowScanner, app *models.App) error {
- var configStr string
-
- err := scanner.Scan(
- &app.Name,
- &configStr,
- )
- if err != nil {
- return err
- }
- if len(configStr) > 0 {
- err = json.Unmarshal([]byte(configStr), &app.Config)
- if err != nil {
- return err
- }
- }
-
- return nil
-
-}
-
-func BuildFilterRouteQuery(filter *models.RouteFilter, whereStm, andStm string) (string, []interface{}) {
- if filter == nil {
- return "", nil
- }
- var b bytes.Buffer
- var args []interface{}
-
- where := func(colOp, val string) {
- if val != "" {
- args = append(args, val)
- if len(args) == 1 {
- fmt.Fprintf(&b, whereStm, colOp)
- } else {
- //TODO: maybe better way to detect/driver SQL dialect-specific things
- if strings.Contains(whereStm, "$") {
- // PgSQL specific
- fmt.Fprintf(&b, andStm, colOp, len(args))
- } else {
- // MySQL specific
- fmt.Fprintf(&b, andStm, colOp)
- }
- }
- }
- }
-
- where("path =", filter.Path)
- where("app_name =", filter.AppName)
- where("image =", filter.Image)
-
- return b.String(), args
-}
-
-func BuildFilterAppQuery(filter *models.AppFilter, whereStm string) (string, []interface{}) {
- if filter == nil {
- return "", nil
- }
-
- if filter.Name != "" {
- return whereStm, []interface{}{filter.Name}
- }
-
- return "", nil
-}
-
-func BuildFilterCallQuery(filter *models.CallFilter, whereStm, andStm string) (string, []interface{}) {
- if filter == nil {
- return "", nil
- }
- var b bytes.Buffer
- var args []interface{}
-
- where := func(colOp, val string) {
- if val != "" {
- args = append(args, val)
- if len(args) == 1 {
- fmt.Fprintf(&b, whereStm, colOp)
- } else {
- fmt.Fprintf(&b, andStm, colOp)
- }
- }
- }
-
- where("path =", filter.Path)
- where("app_name =", filter.AppName)
-
- return b.String(), args
-}
-
-func ScanCall(scanner RowScanner, call *models.FnCall) error {
- err := scanner.Scan(
- &call.ID,
- &call.CreatedAt,
- &call.StartedAt,
- &call.CompletedAt,
- &call.Status,
- &call.AppName,
- &call.Path,
- )
-
- if err == sql.ErrNoRows {
- return models.ErrCallNotFound
- } else if err != nil {
- return err
- }
- return nil
-}
diff --git a/api/datastore/sql/sql.go b/api/datastore/sql/sql.go
index 063e15756..089ffbf60 100644
--- a/api/datastore/sql/sql.go
+++ b/api/datastore/sql/sql.go
@@ -20,7 +20,6 @@ import (
_ "github.com/lib/pq"
"github.com/mattn/go-sqlite3"
_ "github.com/mattn/go-sqlite3"
- "gitlab-odx.oracle.com/odx/functions/api/datastore/internal/datastoreutil"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
@@ -134,8 +133,7 @@ func New(url *url.URL) (models.Datastore, error) {
}
}
- sqlDatastore := &sqlStore{db: db}
- return datastoreutil.NewValidator(sqlDatastore), nil
+ return &sqlStore{db: db}, nil
}
func (ds *sqlStore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) {
diff --git a/api/mqs/new.go b/api/mqs/new.go
index f2cbc5e4a..528c045bd 100644
--- a/api/mqs/new.go
+++ b/api/mqs/new.go
@@ -1,16 +1,26 @@
package mqs
import (
+ "context"
"fmt"
"net/url"
"strings"
"github.com/Sirupsen/logrus"
+ "github.com/opentracing/opentracing-go"
"gitlab-odx.oracle.com/odx/functions/api/models"
)
// New will parse the URL and return the correct MQ implementation.
func New(mqURL string) (models.MessageQueue, error) {
+ mq, err := newmq(mqURL)
+ if err != nil {
+ return nil, err
+ }
+ return &metricMQ{mq}, nil
+}
+
+func newmq(mqURL string) (models.MessageQueue, error) {
// Play with URL schemes here: https://play.golang.org/p/xWAf9SpCBW
u, err := url.Parse(mqURL)
if err != nil {
@@ -31,3 +41,25 @@ func New(mqURL string) (models.MessageQueue, error) {
return nil, fmt.Errorf("mq type not supported %v", u.Scheme)
}
+
+type metricMQ struct {
+ mq models.MessageQueue
+}
+
+func (m *metricMQ) Push(ctx context.Context, t *models.Task) (*models.Task, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "mq_push")
+ defer span.Finish()
+ return m.mq.Push(ctx, t)
+}
+
+func (m *metricMQ) Reserve(ctx context.Context) (*models.Task, error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "mq_reserve")
+ defer span.Finish()
+ return m.mq.Reserve(ctx)
+}
+
+func (m *metricMQ) Delete(ctx context.Context, t *models.Task) error {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "mq_delete")
+ defer span.Finish()
+ return m.mq.Delete(ctx, t)
+}
diff --git a/api/runner/async_runner.go b/api/runner/async_runner.go
index eb056cd5f..dc5b6b685 100644
--- a/api/runner/async_runner.go
+++ b/api/runner/async_runner.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"errors"
+ "io"
"io/ioutil"
"net"
"net/http"
@@ -14,28 +15,32 @@ import (
"time"
"github.com/Sirupsen/logrus"
+ "github.com/opentracing/opentracing-go"
"gitlab-odx.oracle.com/odx/functions/api/models"
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
"gitlab-odx.oracle.com/odx/functions/api/runner/task"
)
func getTask(ctx context.Context, url string) (*models.Task, error) {
+ // TODO shove this ctx into the request?
+ span, _ := opentracing.StartSpanFromContext(ctx, "get_task")
+ defer span.Finish()
+
+ // TODO uh, make a better http client :facepalm:
resp, err := http.Get(url)
if err != nil {
return nil, err
}
+ defer func() {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
- body, err := ioutil.ReadAll(resp.Body)
+ var task models.Task
+ err = json.NewDecoder(resp.Body).Decode(&task)
if err != nil {
return nil, err
}
-
- var task models.Task
-
- if err := json.Unmarshal(body, &task); err != nil {
- return nil, err
- }
-
if task.ID == "" {
return nil, nil
}
@@ -65,13 +70,17 @@ func getCfg(t *models.Task) *task.Config {
return cfg
}
-func deleteTask(url string, task *models.Task) error {
+func deleteTask(ctx context.Context, url string, task *models.Task) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "delete_task")
+ defer span.Finish()
+
// Unmarshal task to be sent over as a json
body, err := json.Marshal(task)
if err != nil {
return err
}
+ // TODO use a reasonable http client..
// Send out Delete request to delete task from queue
req, err := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(body))
if err != nil {
@@ -106,55 +115,68 @@ func startAsyncRunners(ctx context.Context, url string, rnr *Runner, ds models.D
case <-ctx.Done():
wg.Wait()
return
-
default:
- if !rnr.hasAsyncAvailableMemory() {
- log.Debug("memory full")
- time.Sleep(1 * time.Second)
- continue
- }
- task, err := getTask(ctx, url)
- if err != nil {
- if err, ok := err.(net.Error); ok && err.Timeout() {
- log.WithError(err).Errorln("Could not fetch task, timeout.")
- continue
- }
- log.WithError(err).Error("Could not fetch task")
- time.Sleep(1 * time.Second)
- continue
- }
- if task == nil {
- time.Sleep(1 * time.Second)
- continue
- }
-
- ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": task.ID})
- log.Info("Running task:", task.ID)
- // log.Infof("Task: %+v", task)
-
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- // Process Task
- _, err := rnr.RunTrackedTask(task, ctx, getCfg(task))
- if err != nil {
- log.WithError(err).Error("Cannot run task")
- }
- log.Debug("Processed task")
- }()
-
- // Delete task from queue
- if err := deleteTask(url, task); err != nil {
- log.WithError(err).Error("Cannot delete task")
- continue
- }
-
- log.Info("Task complete")
}
+
+ if !rnr.hasAsyncAvailableMemory() { // TODO this should be a channel to subscribe to
+ log.Debug("memory full")
+ time.Sleep(1 * time.Second)
+ continue
+ }
+
+ runAsyncTask(ctx, url, rnr, ds, &wg)
}
}
+func runAsyncTask(ctx context.Context, url string, rnr *Runner, ds models.Datastore, wg *sync.WaitGroup) {
+ // start a new span altogether, unrelated to the shared global context
+ span := opentracing.GlobalTracer().StartSpan("async_task")
+ ctx = opentracing.ContextWithSpan(ctx, span)
+ defer span.Finish()
+ log := common.Logger(ctx)
+
+ task, err := getTask(ctx, url)
+ if err != nil {
+ if err, ok := err.(net.Error); ok && err.Timeout() {
+ log.WithError(err).Errorln("Could not fetch task, timeout.")
+ return
+ }
+ log.WithError(err).Error("Could not fetch task")
+ time.Sleep(1 * time.Second)
+ return
+ }
+ if task == nil {
+ time.Sleep(1 * time.Second)
+ return
+ }
+
+ ctx, log = common.LoggerWithFields(ctx, logrus.Fields{"call_id": task.ID})
+ log.Info("Running task async:", task.ID)
+
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ // Process Task
+ _, err := rnr.RunTrackedTask(task, ctx, getCfg(task))
+ if err != nil {
+ log.WithError(err).Error("Cannot run task")
+ }
+ log.Debug("Processed task")
+ }()
+
+ // TODO this is so wrong... fix later+asap
+
+ // Delete task from queue
+ if err := deleteTask(ctx, url, task); err != nil {
+ log.WithError(err).Error("Cannot delete task")
+ return
+ }
+
+ // TODO uh, even if we don't delete it it still runs but w/e
+ log.Info("Task complete")
+}
+
func tasksrvURL(tasksrv string) string {
parsed, err := url.Parse(tasksrv)
if err != nil {
diff --git a/api/runner/async_runner_test.go b/api/runner/async_runner_test.go
index 84dfcd4a9..330f93a03 100644
--- a/api/runner/async_runner_test.go
+++ b/api/runner/async_runner_test.go
@@ -120,7 +120,7 @@ func TestGetTaskError(t *testing.T) {
{
"url": "/invalid",
"task": getMockTask(),
- "error": "invalid character 'p' after top-level value",
+ "error": "json: cannot unmarshal number into Go value of type models.Task", // TODO WTF!
},
}
@@ -150,24 +150,25 @@ func TestGetTaskError(t *testing.T) {
func TestDeleteTask(t *testing.T) {
buf := setLogBuffer()
mockTask := getMockTask()
+ ctx := context.Background()
ts := getTestServer([]*models.Task{&mockTask})
defer ts.Close()
url := ts.URL + "/tasks"
- err := deleteTask(url, &mockTask)
+ err := deleteTask(ctx, url, &mockTask)
if err == nil {
t.Log(buf.String())
t.Error("expected error 'Not reserver', got", err)
}
- _, err = getTask(context.Background(), url)
+ _, err = getTask(ctx, url)
if err != nil {
t.Log(buf.String())
t.Error("expected no error, got", err)
}
- err = deleteTask(url, &mockTask)
+ err = deleteTask(ctx, url, &mockTask)
if err != nil {
t.Log(buf.String())
t.Error("expected no error, got", err)
@@ -196,7 +197,7 @@ func testRunner(t *testing.T) (*Runner, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
ds := datastore.NewMock()
fnl := logs.NewMock()
- r, err := New(ctx, NewFuncLogger(fnl), NewMetricLogger(), ds)
+ r, err := New(ctx, NewFuncLogger(fnl), ds)
if err != nil {
t.Fatal("Test: failed to create new runner")
}
diff --git a/api/runner/drivers/docker/docker.go b/api/runner/drivers/docker/docker.go
index d275799d2..441da4eea 100644
--- a/api/runner/drivers/docker/docker.go
+++ b/api/runner/drivers/docker/docker.go
@@ -17,8 +17,8 @@ import (
manifest "github.com/docker/distribution/manifest/schema1"
"github.com/fsouza/go-dockerclient"
"github.com/heroku/docker-registry-client/registry"
+ "github.com/opentracing/opentracing-go"
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
- "gitlab-odx.oracle.com/odx/functions/api/runner/common/stats"
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
)
@@ -268,9 +268,7 @@ func (drv *DockerDriver) Prepare(ctx context.Context, task drivers.ContainerTask
return nil, err
}
- createTimer := drv.NewTimer("docker", "create_container", 1.0)
_, err = drv.docker.CreateContainer(container)
- createTimer.Measure()
if err != nil {
// since we retry under the hood, if the container gets created and retry fails, we can just ignore error
if err != docker.ErrContainerAlreadyExists {
@@ -296,17 +294,15 @@ type cookie struct {
drv *DockerDriver
}
-func (c *cookie) Close() error { return c.drv.removeContainer(c.id) }
+func (c *cookie) Close(ctx context.Context) error { return c.drv.removeContainer(ctx, c.id) }
func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) {
return c.drv.run(ctx, c.id, c.task)
}
-func (drv *DockerDriver) removeContainer(container string) error {
- removeTimer := drv.NewTimer("docker", "remove_container", 1.0)
- defer removeTimer.Measure()
+func (drv *DockerDriver) removeContainer(ctx context.Context, container string) error {
err := drv.docker.RemoveContainer(docker.RemoveContainerOptions{
- ID: container, Force: true, RemoveVolumes: true})
+ ID: container, Force: true, RemoveVolumes: true, Context: ctx})
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"container": container}).Error("error removing container")
@@ -323,7 +319,9 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
var config docker.AuthConfiguration // default, tries docker hub w/o user/pass
if task, ok := task.(Auther); ok {
var err error
+ span, _ := opentracing.StartSpanFromContext(ctx, "docker_auth")
config, err = task.DockerAuth()
+ span.Finish()
if err != nil {
return err
}
@@ -334,7 +332,7 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
}
// see if we already have it, if not, pull it
- _, err := drv.docker.InspectImage(task.Image())
+ _, err := drv.docker.InspectImage(ctx, task.Image())
if err == docker.ErrNoSuchImage {
err = drv.pullImage(ctx, task, config)
}
@@ -344,15 +342,8 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
func (drv *DockerDriver) pullImage(ctx context.Context, task drivers.ContainerTask, config docker.AuthConfiguration) error {
log := common.Logger(ctx)
-
reg, repo, tag := drivers.ParseImage(task.Image())
globalRepo := path.Join(reg, repo)
-
- pullTimer := drv.NewTimer("docker", "pull_image", 1.0)
- defer pullTimer.Measure()
-
- drv.Inc("docker", "pull_image_count."+stats.AsStatField(task.Image()), 1, 1)
-
if reg != "" {
config.ServerAddress = reg
}
@@ -367,7 +358,6 @@ func (drv *DockerDriver) pullImage(ctx context.Context, task drivers.ContainerTa
err = drv.docker.PullImage(docker.PullImageOptions{Repository: globalRepo, Tag: tag, Context: ctx}, config)
if err != nil {
- drv.Inc("task", "error.pull."+stats.AsStatField(task.Image()), 1, 1)
log.WithFields(logrus.Fields{"registry": config.ServerAddress, "username": config.Username, "image": task.Image()}).WithError(err).Error("Failed to pull image")
// TODO need to inspect for hub or network errors and pick.
@@ -397,12 +387,10 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
mwOut, mwErr := task.Logger()
- timer := drv.NewTimer("docker", "attach_container", 1)
- waiter, err := drv.docker.AttachToContainerNonBlocking(docker.AttachToContainerOptions{
+ waiter, err := drv.docker.AttachToContainerNonBlocking(ctx, docker.AttachToContainerOptions{
Container: container, OutputStream: mwOut, ErrorStream: mwErr,
Stream: true, Logs: true, Stdout: true, Stderr: true,
Stdin: true, InputStream: task.Input()})
- timer.Measure()
if err != nil && ctx.Err() == nil {
// ignore if ctx has errored, rewrite status lay below
return nil, err
@@ -416,10 +404,7 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
return nil, err
}
- taskTimer := drv.NewTimer("docker", "container_runtime", 1)
-
defer func() {
- taskTimer.Measure()
waiter.Close()
waiter.Wait() // make sure we gather all logs
}()
@@ -528,10 +513,8 @@ func newContainerID(task drivers.ContainerTask) string {
func (drv *DockerDriver) startTask(ctx context.Context, container string) error {
log := common.Logger(ctx)
- startTimer := drv.NewTimer("docker", "start_container", 1.0)
log.WithFields(logrus.Fields{"container": container}).Debug("Starting container execution")
err := drv.docker.StartContainerWithContext(container, nil, ctx)
- startTimer.Measure()
if err != nil {
dockerErr, ok := err.(*docker.Error)
_, containerAlreadyRunning := err.(*docker.ContainerAlreadyRunning)
diff --git a/api/runner/drivers/docker/docker_client.go b/api/runner/drivers/docker/docker_client.go
index 7495d5910..b436fe06d 100644
--- a/api/runner/drivers/docker/docker_client.go
+++ b/api/runner/drivers/docker/docker_client.go
@@ -14,6 +14,8 @@ import (
"github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/log"
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
)
@@ -29,13 +31,13 @@ const (
type dockerClient interface {
// Each of these are github.com/fsouza/go-dockerclient methods
- AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (docker.CloseWaiter, error)
+ AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (docker.CloseWaiter, error)
WaitContainerWithContext(id string, ctx context.Context) (int, error)
StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) error
CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error)
RemoveContainer(opts docker.RemoveContainerOptions) error
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
- InspectImage(name string) (*docker.Image, error)
+ InspectImage(ctx context.Context, name string) (*docker.Image, error)
Stats(opts docker.StatsOptions) error
}
@@ -95,20 +97,24 @@ type dockerWrap struct {
}
func (d *dockerWrap) retry(ctx context.Context, f func() error) error {
- log := common.Logger(ctx)
+ var i int
+ span := opentracing.SpanFromContext(ctx)
+ defer func() { span.LogFields(log.Int("docker_call_retries", i)) }()
+
+ logger := common.Logger(ctx)
var b common.Backoff
- for {
+ for ; ; i++ {
select {
case <-ctx.Done():
d.Inc("task", "fail.docker", 1, 1)
- log.WithError(ctx.Err()).Warnf("retrying on docker errors timed out, restart docker or rotate this instance?")
+ logger.WithError(ctx.Err()).Warnf("retrying on docker errors timed out, restart docker or rotate this instance?")
return ctx.Err()
default:
}
err := filter(ctx, f())
if common.IsTemporary(err) || isDocker50x(err) {
- log.WithError(err).Warn("docker temporary error, retrying")
+ logger.WithError(err).Warn("docker temporary error, retrying")
b.Sleep()
d.Inc("task", "error.docker", 1, 1)
continue
@@ -183,24 +189,11 @@ func filterNoSuchContainer(ctx context.Context, err error) error {
return err
}
-func filterNotRunning(ctx context.Context, err error) error {
- log := common.Logger(ctx)
- if err == nil {
- return nil
- }
+func (d *dockerWrap) AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "docker_attach_container")
+ defer span.Finish()
- _, containerNotRunning := err.(*docker.ContainerNotRunning)
- dockerErr, ok := err.(*docker.Error)
- if containerNotRunning || (ok && dockerErr.Status == 304) {
- log.WithError(err).Error("filtering error")
- return nil
- }
-
- return err
-}
-
-func (d *dockerWrap) AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) {
- ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
+ ctx, cancel := context.WithTimeout(ctx, retryTimeout)
defer cancel()
err = d.retry(ctx, func() error {
w, err = d.docker.AttachToContainerNonBlocking(opts)
@@ -214,6 +207,8 @@ func (d *dockerWrap) AttachToContainerNonBlocking(opts docker.AttachToContainerO
}
func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (code int, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "docker_wait_container")
+ defer span.Finish()
err = d.retry(ctx, func() error {
code, err = d.dockerNoTimeout.WaitContainerWithContext(id, ctx)
return err
@@ -222,6 +217,8 @@ func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (c
}
func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) (err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "docker_start_container")
+ defer span.Finish()
err = d.retry(ctx, func() error {
err = d.dockerNoTimeout.StartContainerWithContext(id, hostConfig, ctx)
if _, ok := err.(*docker.NoSuchContainer); ok {
@@ -234,7 +231,9 @@ func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.Hos
}
func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *docker.Container, err error) {
- err = d.retry(opts.Context, func() error {
+ span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_create_container")
+ defer span.Finish()
+ err = d.retry(ctx, func() error {
c, err = d.dockerNoTimeout.CreateContainer(opts)
return err
})
@@ -242,7 +241,9 @@ func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *doc
}
func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) (err error) {
- err = d.retry(opts.Context, func() error {
+ span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_pull_image")
+ defer span.Finish()
+ err = d.retry(ctx, func() error {
err = d.dockerNoTimeout.PullImage(opts, auth)
return err
})
@@ -250,7 +251,13 @@ func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthCon
}
func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err error) {
- ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
+ // extract the span, but do not keep the context, since the enclosing context
+ // may be timed out, and we still want to remove the container. TODO in caller? who cares?
+ span, _ := opentracing.StartSpanFromContext(opts.Context, "docker_remove_container")
+ defer span.Finish()
+ ctx := opentracing.ContextWithSpan(context.Background(), span)
+
+ ctx, cancel := context.WithTimeout(ctx, retryTimeout)
defer cancel()
err = d.retry(ctx, func() error {
err = d.docker.RemoveContainer(opts)
@@ -259,8 +266,10 @@ func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err er
return filterNoSuchContainer(ctx, err)
}
-func (d *dockerWrap) InspectImage(name string) (i *docker.Image, err error) {
- ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
+func (d *dockerWrap) InspectImage(ctx context.Context, name string) (i *docker.Image, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "docker_inspect_image")
+ defer span.Finish()
+ ctx, cancel := context.WithTimeout(ctx, retryTimeout)
defer cancel()
err = d.retry(ctx, func() error {
i, err = d.docker.InspectImage(name)
diff --git a/api/runner/drivers/docker/docker_test.go b/api/runner/drivers/docker/docker_test.go
index 183ffb94c..3a8fd8ebb 100644
--- a/api/runner/drivers/docker/docker_test.go
+++ b/api/runner/drivers/docker/docker_test.go
@@ -47,7 +47,7 @@ func TestRunnerDocker(t *testing.T) {
if err != nil {
t.Fatal("Couldn't prepare task test")
}
- defer cookie.Close()
+ defer cookie.Close(ctx)
result, err := cookie.Run(ctx)
if err != nil {
@@ -73,7 +73,7 @@ func TestRunnerDockerStdin(t *testing.T) {
if err != nil {
t.Fatal("Couldn't prepare task test")
}
- defer cookie.Close()
+ defer cookie.Close(ctx)
result, err := cookie.Run(ctx)
if err != nil {
diff --git a/api/runner/drivers/driver.go b/api/runner/drivers/driver.go
index 364a85934..2ae63a11d 100644
--- a/api/runner/drivers/driver.go
+++ b/api/runner/drivers/driver.go
@@ -17,7 +17,8 @@ import (
// Clients should always call Close() on a DriverCookie after they are done
// with it.
type Cookie interface {
- io.Closer
+ // Close should clean up any resources the cookie was using, or was going to use.
+ Close(ctx context.Context) error
// Run should execute task on the implementation.
// RunResult captures the result of task execution. This means if task
diff --git a/api/runner/drivers/mock/mocker.go b/api/runner/drivers/mock/mocker.go
index 301c37852..9060d6c02 100644
--- a/api/runner/drivers/mock/mocker.go
+++ b/api/runner/drivers/mock/mocker.go
@@ -24,7 +24,7 @@ type cookie struct {
m *Mocker
}
-func (c *cookie) Close() error { return nil }
+func (c *cookie) Close(context.Context) error { return nil }
func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) {
c.m.count++
diff --git a/api/runner/metric_logger.go b/api/runner/metric_logger.go
deleted file mode 100644
index a6ab4811c..000000000
--- a/api/runner/metric_logger.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package runner
-
-import (
- "context"
- "time"
-
- "github.com/Sirupsen/logrus"
- "gitlab-odx.oracle.com/odx/functions/api/runner/common"
-)
-
-type MetricLogger interface {
- Log(context.Context, map[string]interface{})
- LogCount(context.Context, string, int)
- LogGauge(context.Context, string, int)
- LogTime(context.Context, string, time.Duration)
-}
-
-type Metric map[string]interface{}
-
-func NewMetricLogger() MetricLogger {
- return &DefaultMetricLogger{}
-}
-
-type DefaultMetricLogger struct{}
-
-func (l *DefaultMetricLogger) Log(ctx context.Context, metric map[string]interface{}) {
- log := common.Logger(ctx)
- log.WithFields(logrus.Fields(metric)).Info()
-}
-
-func (l *DefaultMetricLogger) LogCount(ctx context.Context, name string, value int) {
- l.Log(ctx, Metric{
- "name": name,
- "value": value,
- "type": "count",
- })
-}
-
-func (l *DefaultMetricLogger) LogTime(ctx context.Context, name string, value time.Duration) {
- l.Log(ctx, Metric{
- "name": name,
- "value": value,
- "type": "time",
- })
-}
-
-func (l *DefaultMetricLogger) LogGauge(ctx context.Context, name string, value int) {
- l.Log(ctx, Metric{
- "name": name,
- "value": value,
- "type": "gauge",
- })
-}
diff --git a/api/runner/runner.go b/api/runner/runner.go
index cae79cea7..aa335d459 100644
--- a/api/runner/runner.go
+++ b/api/runner/runner.go
@@ -14,6 +14,8 @@ import (
"time"
"github.com/Sirupsen/logrus"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/log"
"gitlab-odx.oracle.com/odx/functions/api/models"
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
@@ -28,7 +30,6 @@ import (
type Runner struct {
driver drivers.Driver
taskQueue chan *containerTask
- mlog MetricLogger
flog FuncLogger
availableMem int64
usedMem int64
@@ -50,7 +51,7 @@ const (
DefaultIdleTimeout = 30 * time.Second
)
-func New(ctx context.Context, flog FuncLogger, mlog MetricLogger, ds models.Datastore) (*Runner, error) {
+func New(ctx context.Context, flog FuncLogger, ds models.Datastore) (*Runner, error) {
// TODO: Is this really required for the container drivers? Can we remove it?
env := common.NewEnvironment(func(e *common.Environment) {})
@@ -64,7 +65,6 @@ func New(ctx context.Context, flog FuncLogger, mlog MetricLogger, ds models.Data
driver: driver,
taskQueue: make(chan *containerTask, 100),
flog: flog,
- mlog: mlog,
availableMem: getAvailableMemory(),
usedMem: 0,
datastore: ds,
@@ -112,13 +112,8 @@ func (r *Runner) handleTask(task *containerTask) {
time.Sleep(time.Microsecond)
}
- metricBaseName := fmt.Sprintf("run.%s.", task.cfg.AppName)
- r.mlog.LogTime(task.ctx, metricBaseName+"wait_time", waitTime)
- r.mlog.LogTime(task.ctx, "run.wait_time", waitTime)
-
if timedOut {
// Send to a signal to this task saying it cannot run
- r.mlog.LogCount(task.ctx, metricBaseName+"timeout", 1)
task.canRun <- false
return
}
@@ -164,9 +159,35 @@ func (r *Runner) checkMemAndUse(req uint64) bool {
return true
}
+func (r *Runner) awaitSlot(ctask *containerTask) error {
+ span, _ := opentracing.StartSpanFromContext(ctask.ctx, "wait_mem_slot")
+ defer span.Finish()
+ // Check if has enough available memory
+ // If available, use it
+ if !r.checkMemAndUse(ctask.cfg.Memory) {
+ // If not, try add task to the queue
+ select {
+ case r.taskQueue <- ctask:
+ default:
+ span.LogFields(log.Int("queue full", 1))
+ // If queue is full, return error
+ return ErrFullQueue
+ }
+
+ // If task was added to the queue, wait for permission
+ if ok := <-ctask.canRun; !ok {
+ span.LogFields(log.Int("memory timeout", 1))
+ // This task timed out, not available memory
+ return ErrTimeOutNoMemory
+ }
+ }
+ return nil
+}
+
// run is responsible for running 1 instance of a docker container
func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult, error) {
- var err error
+ span, ctx := opentracing.StartSpanFromContext(ctx, "run_container")
+ defer span.Finish()
if cfg.Memory == 0 {
cfg.Memory = 128
@@ -183,36 +204,19 @@ func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult,
canRun: make(chan bool),
}
- metricBaseName := fmt.Sprintf("run.%s.", cfg.AppName)
- r.mlog.LogCount(ctx, metricBaseName+"requests", 1)
-
- // Check if has enough available memory
- // If available, use it
- if !r.checkMemAndUse(cfg.Memory) {
- // If not, try add task to the queue
- select {
- case r.taskQueue <- ctask:
- default:
- // If queue is full, return error
- r.mlog.LogCount(ctx, "queue.full", 1)
- return nil, ErrFullQueue
- }
-
- // If task was added to the queue, wait for permission
- if ok := <-ctask.canRun; !ok {
- // This task timed out, not available memory
- return nil, ErrTimeOutNoMemory
- }
- } else {
- r.mlog.LogTime(ctx, metricBaseName+"waittime", 0)
- }
- defer r.addUsedMem(-1 * int64(cfg.Memory))
-
- cookie, err := r.driver.Prepare(ctx, ctask)
+ err := r.awaitSlot(ctask)
if err != nil {
return nil, err
}
- defer cookie.Close()
+ defer r.addUsedMem(-1 * int64(cfg.Memory))
+
+ span, pctx := opentracing.StartSpanFromContext(ctx, "prepare")
+ cookie, err := r.driver.Prepare(pctx, ctask)
+ span.Finish()
+ if err != nil {
+ return nil, err
+ }
+ defer cookie.Close(ctx)
select {
case <-cfg.Ready:
@@ -220,23 +224,14 @@ func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult,
close(cfg.Ready)
}
- metricStart := time.Now()
-
- result, err := cookie.Run(ctx)
+ span, rctx := opentracing.StartSpanFromContext(ctx, "run")
+ result, err := cookie.Run(rctx)
+ span.Finish()
if err != nil {
return nil, err
}
- if result.Status() == "success" {
- r.mlog.LogCount(ctx, metricBaseName+"succeeded", 1)
- } else {
- r.mlog.LogCount(ctx, metricBaseName+"error", 1)
- }
-
- metricElapsed := time.Since(metricStart)
- r.mlog.LogTime(ctx, metricBaseName+"time", metricElapsed)
- r.mlog.LogTime(ctx, "run.exec_time", metricElapsed)
-
+ span.LogFields(log.String("status", result.Status()))
return result, nil
}
diff --git a/api/runner/runner_test.go b/api/runner/runner_test.go
index 7f69fda1c..d0f408be4 100644
--- a/api/runner/runner_test.go
+++ b/api/runner/runner_test.go
@@ -23,7 +23,7 @@ func TestRunnerHello(t *testing.T) {
ds := datastore.NewMock()
fnl := logs.NewMock()
fLogger := NewFuncLogger(fnl)
- runner, err := New(ctx, fLogger, NewMetricLogger(), ds)
+ runner, err := New(ctx, fLogger, ds)
if err != nil {
t.Fatalf("Test error during New() - %s", err)
}
@@ -82,7 +82,7 @@ func TestRunnerError(t *testing.T) {
ds := datastore.NewMock()
fnl := logs.NewMock()
fLogger := NewFuncLogger(fnl)
- runner, err := New(ctx, fLogger, NewMetricLogger(), ds)
+ runner, err := New(ctx, fLogger, ds)
if err != nil {
t.Fatalf("Test error during New() - %s", err)
}
diff --git a/api/runner/worker.go b/api/runner/worker.go
index c0be3f740..f4d70d8de 100644
--- a/api/runner/worker.go
+++ b/api/runner/worker.go
@@ -10,6 +10,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/go-openapi/strfmt"
+ "github.com/opentracing/opentracing-go"
"gitlab-odx.oracle.com/odx/functions/api/id"
"gitlab-odx.oracle.com/odx/functions/api/models"
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
@@ -279,6 +280,8 @@ func (g *ghostWriter) Write(b []byte) (int, error) {
func (g *ghostWriter) Close() error { return nil }
func (hc *htfn) serve(ctx context.Context) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "run_hot_container")
+ defer span.Finish()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
cfg := *hc.cfg
@@ -291,6 +294,7 @@ func (hc *htfn) serve(ctx context.Context) {
stderr := &ghostWriter{inner: bwLog}
+ first := true
go func() {
for {
select {
@@ -308,6 +312,19 @@ func (hc *htfn) serve(ctx context.Context) {
logger.Info("Canceling inactive hot function")
cancel()
case t := <-hc.tasks:
+ var span opentracing.Span
+ if first {
+ // TODO this doesn't work as intended; beyond me atm, but the spans do come up.
+ // need a way to add the span from starting container to the first execution, basically.
+ spanHot := opentracing.SpanFromContext(ctx)
+ spanTask := opentracing.SpanFromContext(t.Ctx)
+ span = opentracing.StartSpan("dispatch", opentracing.ChildOf(spanTask.Context()), opentracing.FollowsFrom(spanHot.Context()))
+ ctx = opentracing.ContextWithSpan(t.Ctx, span)
+ first = false
+ } else {
+ span, ctx = opentracing.StartSpanFromContext(t.Ctx, "dispatch")
+ }
+
// swap logs to log to the task logger instead of stderr
tlog := hc.rnr.flog.Writer(ctx, cfg.AppName, cfg.Path, cfg.Image, cfg.ID)
stderr.swap(tlog)
@@ -319,6 +336,7 @@ func (hc *htfn) serve(ctx context.Context) {
status = "error"
logrus.WithField("ctx", ctx).Info("task failed")
}
+ span.Finish()
hc.once()
stderr.swap(bwLog) // swap back out before flush
@@ -338,6 +356,7 @@ func (hc *htfn) serve(ctx context.Context) {
cfg.Stdout = hc.containerOut
cfg.Stderr = stderr
+ // TODO how to tie a span from the first task into this? yikes
result, err := hc.rnr.run(ctx, &cfg)
if err != nil {
logger.WithError(err).Error("hot function failure detected")
diff --git a/api/server/init.go b/api/server/init.go
index db60ddd0d..850edf6bf 100644
--- a/api/server/init.go
+++ b/api/server/init.go
@@ -26,6 +26,7 @@ func init() {
viper.SetDefault(EnvDBURL, fmt.Sprintf("sqlite3://%s/data/fn.db", cwd))
viper.SetDefault(EnvLOGDBURL, "") // default to just using DB url
viper.SetDefault(EnvPort, 8080)
+ viper.SetDefault(EnvZipkinURL, "") // off default
viper.SetDefault(EnvAPIURL, fmt.Sprintf("http://127.0.0.1:%d", viper.GetInt(EnvPort)))
viper.AutomaticEnv() // picks up env vars automatically
logLevel, err := logrus.ParseLevel(viper.GetString(EnvLogLevel))
diff --git a/api/server/runner_async_test.go b/api/server/runner_async_test.go
index ac919c2a2..2df3fd168 100644
--- a/api/server/runner_async_test.go
+++ b/api/server/runner_async_test.go
@@ -32,7 +32,7 @@ func testRouterAsync(ds models.Datastore, mq models.MessageQueue, rnr *runner.Ru
r := s.Router
r.Use(gin.Logger())
- r.Use(prepareMiddleware(ctx))
+ s.Router.Use(loggerWrap)
s.bindHandlers(ctx)
return r
}
diff --git a/api/server/runner_test.go b/api/server/runner_test.go
index edfa2f0f8..64d4f86bb 100644
--- a/api/server/runner_test.go
+++ b/api/server/runner_test.go
@@ -18,7 +18,7 @@ func testRunner(t *testing.T) (*runner.Runner, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
ds := datastore.NewMock()
fnl := logs.NewMock()
- r, err := runner.New(ctx, runner.NewFuncLogger(fnl), runner.NewMetricLogger(), ds)
+ r, err := runner.New(ctx, runner.NewFuncLogger(fnl), ds)
if err != nil {
t.Fatal("Test: failed to create new runner")
}
diff --git a/api/server/server.go b/api/server/server.go
index e6b839b92..b98a82bc3 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -15,6 +15,9 @@ import (
"github.com/Sirupsen/logrus"
"github.com/ccirello/supervisor"
"github.com/gin-gonic/gin"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/openzipkin/zipkin-go-opentracing"
"github.com/patrickmn/go-cache"
"github.com/spf13/viper"
"gitlab-odx.oracle.com/odx/functions/api"
@@ -28,12 +31,13 @@ import (
)
const (
- EnvLogLevel = "log_level"
- EnvMQURL = "mq_url"
- EnvDBURL = "db_url"
- EnvLOGDBURL = "logstore_url"
- EnvPort = "port" // be careful, Gin expects this variable to be "port"
- EnvAPIURL = "api_url"
+ EnvLogLevel = "log_level"
+ EnvMQURL = "mq_url"
+ EnvDBURL = "db_url"
+ EnvLOGDBURL = "logstore_url"
+ EnvPort = "port" // be careful, Gin expects this variable to be "port"
+ EnvAPIURL = "api_url"
+ EnvZipkinURL = "zipkin_url"
)
type Server struct {
@@ -84,10 +88,9 @@ func NewFromEnv(ctx context.Context) *Server {
// New creates a new Functions server with the passed in datastore, message queue and API URL
func New(ctx context.Context, ds models.Datastore, mq models.MessageQueue, logDB models.FnLog, apiURL string, opts ...ServerOption) *Server {
- metricLogger := runner.NewMetricLogger()
funcLogger := runner.NewFuncLogger(logDB)
- rnr, err := runner.New(ctx, funcLogger, metricLogger, ds)
+ rnr, err := runner.New(ctx, funcLogger, ds)
if err != nil {
logrus.WithError(err).Fatalln("Failed to create a runner")
return nil
@@ -105,7 +108,8 @@ func New(ctx context.Context, ds models.Datastore, mq models.MessageQueue, logDB
}
setMachineId()
- s.Router.Use(prepareMiddleware(ctx))
+ setTracer()
+ s.Router.Use(loggerWrap, traceWrap)
s.bindHandlers(ctx)
for _, opt := range opts {
@@ -117,6 +121,55 @@ func New(ctx context.Context, ds models.Datastore, mq models.MessageQueue, logDB
return s
}
+// we should use http grr
+func traceWrap(c *gin.Context) {
+ // try to grab a span from the request if made from another service, ignore err if not
+ wireContext, _ := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(c.Request.Header))
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ // TODO we should add more tags?
+ serverSpan := opentracing.StartSpan("serve_http", ext.RPCServerOption(wireContext), opentracing.Tag{"path", c.Request.URL.Path})
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(c.Request.Context(), serverSpan)
+ c.Request = c.Request.WithContext(ctx)
+ c.Next()
+}
+
+func setTracer() {
+ var (
+ debugMode = false
+ serviceName = "fn-server"
+ serviceHostPort = "localhost:8080" // meh
+ zipkinHTTPEndpoint = viper.GetString(EnvZipkinURL)
+ // ex: "http://zipkin:9411/api/v1/spans"
+ )
+
+ if zipkinHTTPEndpoint == "" {
+ return
+ }
+
+ logger := zipkintracer.LoggerFunc(func(i ...interface{}) error { logrus.Error(i...); return nil })
+
+ collector, err := zipkintracer.NewHTTPCollector(zipkinHTTPEndpoint, zipkintracer.HTTPLogger(logger))
+ if err != nil {
+ logrus.WithError(err).Fatalln("couldn't start trace collector")
+ }
+ tracer, err := zipkintracer.NewTracer(zipkintracer.NewRecorder(collector, debugMode, serviceHostPort, serviceName),
+ zipkintracer.ClientServerSameSpan(true),
+ zipkintracer.TraceID128Bit(true),
+ )
+ if err != nil {
+ logrus.WithError(err).Fatalln("couldn't start tracer")
+ }
+
+ opentracing.SetGlobalTracer(tracer)
+ logrus.WithFields(logrus.Fields{"url": zipkinHTTPEndpoint}).Info("started tracer")
+}
+
func setMachineId() {
port := uint16(viper.GetInt(EnvPort))
addr := whoAmI().To4()
@@ -150,22 +203,19 @@ func whoAmI() net.IP {
return nil
}
-// todo: remove this or change name
-func prepareMiddleware(ctx context.Context) gin.HandlerFunc {
- return func(c *gin.Context) {
- ctx, _ := common.LoggerWithFields(ctx, extractFields(c))
+func loggerWrap(c *gin.Context) {
+ ctx, _ := common.LoggerWithFields(c.Request.Context(), extractFields(c))
- if appName := c.Param(api.CApp); appName != "" {
- c.Set(api.AppName, appName)
- }
-
- if routePath := c.Param(api.CRoute); routePath != "" {
- c.Set(api.Path, routePath)
- }
-
- c.Request = c.Request.WithContext(ctx)
- c.Next()
+ if appName := c.Param(api.CApp); appName != "" {
+ c.Set(api.AppName, appName)
}
+
+ if routePath := c.Param(api.CRoute); routePath != "" {
+ c.Set(api.Path, routePath)
+ }
+
+ c.Request = c.Request.WithContext(ctx)
+ c.Next()
}
func DefaultEnqueue(ctx context.Context, mq models.MessageQueue, task *models.Task) (*models.Task, error) {
diff --git a/api/server/server_test.go b/api/server/server_test.go
index d0806329d..a2ff1c74e 100644
--- a/api/server/server_test.go
+++ b/api/server/server_test.go
@@ -38,7 +38,7 @@ func testServer(ds models.Datastore, mq models.MessageQueue, logDB models.FnLog,
r := s.Router
r.Use(gin.Logger())
- s.Router.Use(prepareMiddleware(ctx))
+ s.Router.Use(loggerWrap)
s.bindHandlers(ctx)
return s
}
diff --git a/docs/operating/metrics.md b/docs/operating/metrics.md
index 1cc2bb3e5..0fb60cc91 100644
--- a/docs/operating/metrics.md
+++ b/docs/operating/metrics.md
@@ -1,23 +1,16 @@
# Metrics
-Metrics are emitted via the logs for few couple of reasons:
+You can use zipkin to gather stats about the functions server.
-1. Everything supports STDERR.
-2. User can optionally use them, if not, they just end up in the logs.
-3. No particular metrics system required, in other words, all metrics systems can be used via adapters (see below).
+Running a zipkin node is easy to get started, they have a docker container:
-## Metrics
+[zipkin page](http://zipkin.io/pages/quickstart.html)
-The metrics format follows logfmt format and looks like this:
+With zipkin running you can point functions to it using an env var:
-```
-metric=someevent value=1 type=count
-metric=somegauge value=50 type=gauge
-```
+`ZIPKIN_URL=http://zipkin:9411/api/v1/spans`
-It's a very simple format that can be easily parsed by any logfmt parser and passed on to another stats service.
-
-TODO: List all metrics we emit to logs.
+TODO hook up zipkin to poop out to logs/statsd/something else too
## Statsd
diff --git a/glide.lock b/glide.lock
index a721b910d..3aaf0ed01 100644
--- a/glide.lock
+++ b/glide.lock
@@ -1,5 +1,5 @@
-hash: ed88f1a46f149bac3eea6052d409a2a619f762ee51f2655b3fc22e8b2fa806ad
-updated: 2017-07-19T22:11:29.697513445-07:00
+hash: 68fe5d3130a8346f3e38c0924b70369592ccdf0ffe503858056694a71a19acd2
+updated: 2017-07-21T18:13:30.488739267-07:00
imports:
- name: code.cloudfoundry.org/bytefmt
version: f4415fafc5619dd75599a54a7c91fb3948ad58bd
@@ -7,6 +7,10 @@ imports:
version: 1ccc43bfb9c93cb401a4025e49c64ba71e5e668b
subpackages:
- proto
+- name: github.com/apache/thrift
+ version: 0dd823580c78a79ae9696eb9b3650e400fff140f
+ subpackages:
+ - lib/go/thrift
- name: github.com/asaskevich/govalidator
version: aa5cce4a76edb1a5acecab1870c17abbffb5419e
- name: github.com/Azure/go-ansiterm
@@ -23,6 +27,10 @@ imports:
version: 230eff6403e22b43f5fba7b28466dae4718934dd
- name: github.com/cenkalti/backoff
version: 5d150e7eec023ce7a124856b37c68e54b4050ac7
+- name: github.com/davecgh/go-spew
+ version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
+ subpackages:
+ - spew
- name: github.com/dchest/siphash
version: 4ebf1de738443ea7f45f02dc394c4df1942a126d
- name: github.com/dghubble/go-twitter
@@ -85,6 +93,14 @@ imports:
version: 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
- name: github.com/docker/libtrust
version: fa567046d9b14f6aa788882a950d69651d230b21
+- name: github.com/eapache/go-resiliency
+ version: b1fe83b5b03f624450823b751b662259ffc6af70
+ subpackages:
+ - breaker
+- name: github.com/eapache/go-xerial-snappy
+ version: bb955e01b9346ac19dc29eb16586c90ded99a98c
+- name: github.com/eapache/queue
+ version: 44cc805cf13205b55f69e14bcb69867d1ae92f98
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/fsouza/go-dockerclient
@@ -92,6 +108,13 @@ imports:
- name: github.com/funcy/functions_go
version: 5d9948e8b1292c5421b5dd98bb6a9b5535d5e1ba
subpackages:
+ - client
+ - client/apps
+ - client/call
+ - client/operations
+ - client/routes
+ - client/tasks
+ - client/version
- models
- name: github.com/garyburd/redigo
version: 95d11dba2d44531bdb8022752b98912baafae03a
@@ -103,6 +126,8 @@ imports:
subpackages:
- binding
- render
+- name: github.com/go-logfmt/logfmt
+ version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-openapi/analysis
version: 0473cb67199f68b8b7d90e641afd9e79ad36b851
- name: github.com/go-openapi/errors
@@ -129,10 +154,16 @@ imports:
version: 035dcd74f1f61e83debe1c22950dc53556e7e4b2
- name: github.com/go-sql-driver/mysql
version: 56226343bd543f91a3930ed73ebdd03cfd633e85
+- name: github.com/gogo/protobuf
+ version: 83c564581ed68caafcec877c710d7ac243232c93
+ subpackages:
+ - proto
- name: github.com/golang/protobuf
version: 2402d76f3d41f928c7902a765dfc872356dd3aad
subpackages:
- proto
+- name: github.com/golang/snappy
+ version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/google/btree
version: 316fb6d3f031ae8f4d457c6c5186b9e3ded70435
- name: github.com/google/go-querystring
@@ -172,6 +203,8 @@ imports:
version: d9bd385d68c068f1fabb5057e3dedcbcbb039d0f
subpackages:
- reflectx
+- name: github.com/kr/logfmt
+ version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
- name: github.com/lib/pq
version: 8837942c3e09574accbc5f150e2c5e057189cace
subpackages:
@@ -201,18 +234,43 @@ imports:
subpackages:
- libcontainer/system
- libcontainer/user
+- name: github.com/opentracing-contrib/go-observer
+ version: a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
+- name: github.com/opentracing/opentracing-go
+ version: 06f47b42c792fef2796e9681353e1d908c417827
+ subpackages:
+ - ext
+ - log
+- name: github.com/openzipkin/zipkin-go-opentracing
+ version: e6b1ad87c0787de9cb033d4f680fe69cd69e19fe
+ subpackages:
+ - flag
+ - thrift/gen-go/scribe
+ - thrift/gen-go/zipkincore
+ - types
+ - wire
- name: github.com/patrickmn/go-cache
version: 7ac151875ffb48b9f3ccce9ea20f020b0c1596c8
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: fe7536c3dee2596cdd23ee9976a17c22bdaae286
+- name: github.com/pierrec/lz4
+ version: 5a3d2245f97fc249850e7802e3c01fad02a1c316
+- name: github.com/pierrec/xxHash
+ version: a0006b13c722f7f12368c00a3d3c2ae8a999a0c6
+ subpackages:
+ - xxHash32
- name: github.com/pkg/errors
version: c605e284fe17294bda444b34710735b29d1a9d90
- name: github.com/PuerkitoBio/purell
version: b938d81255b5473c57635324295cb0fe398c7a58
- name: github.com/PuerkitoBio/urlesc
version: bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5
+- name: github.com/rcrowley/go-metrics
+ version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c
+- name: github.com/Shopify/sarama
+ version: 2fd980e23bdcbb8edeb78fc704de0c39a6567ffc
- name: github.com/Sirupsen/logrus
version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f
repo: https://github.com/sirupsen/logrus.git
diff --git a/glide.yaml b/glide.yaml
index bfe11924f..7776b5e92 100644
--- a/glide.yaml
+++ b/glide.yaml
@@ -68,10 +68,11 @@ import:
- bson
- package: github.com/jmoiron/sqlx
- package: github.com/mattn/go-sqlite3
+- package: github.com/opentracing/opentracing-go
+- package: github.com/openzipkin/zipkin-go-opentracing
testImport:
- package: github.com/vrischmann/envconfig
- package: github.com/opencontainers/go-digest
branch: master
- package: github.com/patrickmn/go-cache
branch: master
-
\ No newline at end of file
diff --git a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md b/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
new file mode 100644
index 000000000..b0f107cbc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+Contributions are always welcome, both reporting issues and submitting pull requests!
+
+### Reporting issues
+
+Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
+
+- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
+- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
+- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
+
+Also, please include the following information about your environment, so we can help you faster:
+
+- What version of Kafka are you using?
+- What version of Go are you using?
+- What are the values of your Producer/Consumer/Client configuration?
+
+
+### Submitting pull requests
+
+We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
+
+- If you plan to work on something major, please open an issue to discuss the design first.
+- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
+- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
+- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
+- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
+- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
+- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
+- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions
diff --git a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md b/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..7ccafb624
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,20 @@
+##### Versions
+
+*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.*
+Sarama Version:
+Kafka Version:
+Go Version:
+
+##### Configuration
+
+What configuration values are you using for Sarama and Kafka?
+
+##### Logs
+
+When filing an issue please provide logs from Sarama and Kafka if at all
+possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug
+output.
+
+##### Problem Description
+
+
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 000000000..3591f9ff3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 000000000..04d399ece
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,32 @@
+language: go
+go:
+- 1.7.3
+- 1.8
+
+env:
+ global:
+ - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+ - TOXIPROXY_ADDR=http://localhost:8474
+ - KAFKA_INSTALL_ROOT=/home/travis/kafka
+ - KAFKA_HOSTNAME=localhost
+ - DEBUG=true
+ matrix:
+ - KAFKA_VERSION=0.9.0.1
+ - KAFKA_VERSION=0.10.2.0
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+
+install:
+- make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- make fmt
+
+sudo: false
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 000000000..0a0082df7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,389 @@
+# Changelog
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+ version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+ that you still need to specify the Kafka version in the Sarama configuration
+ for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+ active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+ in-sync broker IDs for the given partition, now that the Kafka versions for
+ which this was misleading are no longer in our supported set
+ ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+ partitioner with a custom hash method in case the default (FNV-1a) is not
+ suitable
+ ([#837](https://github.com/Shopify/sarama/pull/837),
+ [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+ correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+ transparent if you're using the `AddGroupProtocol` or
+ `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+ the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+ ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+ ([#701](https://github.com/Shopify/sarama/pull/701),
+ [#746](https://github.com/Shopify/sarama/pull/746),
+ [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+ ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+ ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+ `HighWaterMarkOffset` values of its child topic/partitions
+ ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+ ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+ ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+ ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+ ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+ ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+ ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+ ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+ in v1.10.0. Go's timers are finicky
+ ([#730](https://github.com/Shopify/sarama/pull/730),
+ [#733](https://github.com/Shopify/sarama/pull/733),
+ [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+ ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+ ([#672](https://github.com/Shopify/sarama/pull/672),
+ [#678](https://github.com/Shopify/sarama/pull/678),
+ [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+ ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+ ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+ ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+ ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+ misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+ ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+ ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+ ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+ ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+ ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+ ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+ ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+ ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+ implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+ ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+ ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+ ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+ ([#605](https://github.com/Shopify/sarama/pull/605),
+ [#621](https://github.com/Shopify/sarama/pull/621),
+ [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+ ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+ - All protocol messages and fields
+ ([#586](https://github.com/Shopify/sarama/pull/586),
+ [#588](https://github.com/Shopify/sarama/pull/588),
+ [#590](https://github.com/Shopify/sarama/pull/590)).
+ - Verified that TLS support works
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+ - Fixed the OffsetManager compatibility
+ ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+ ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+ ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/Shopify/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/Shopify/sarama/pull/549),
+ [#550](https://github.com/Shopify/sarama/pull/550),
+ [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+ [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/Shopify/sarama/pull/450),
+ [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/Shopify/sarama/pull/439),
+ [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/Shopify/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+ [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 000000000..8121b63b1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 000000000..626b09a54
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,21 @@
+default: fmt vet errcheck test
+
+test:
+ go test -v -timeout 60s -race ./...
+
+vet:
+ go vet ./...
+
+errcheck:
+ errcheck github.com/Shopify/sarama/...
+
+fmt:
+ @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck get
+
+install_errcheck:
+ go get github.com/kisielk/errcheck
+
+get:
+ go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 000000000..6e12a07ae
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,38 @@
+sarama
+======
+
+[](https://godoc.org/github.com/Shopify/sarama)
+[](https://travis-ci.org/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+ technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+ contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 000000000..f4b848a30
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provision :shell, path: "vagrant/provision.sh"
+
+ config.vm.network "private_network", ip: "192.168.100.67"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = MEMORY
+ end
+end
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 000000000..ab65f01cc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ApiVersionsRequest struct {
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request_test.go b/vendor/github.com/Shopify/sarama/api_versions_request_test.go
new file mode 100644
index 000000000..5ab4fa71c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request_test.go
@@ -0,0 +1,14 @@
+package sarama
+
+import "testing"
+
+var (
+ apiVersionRequest = []byte{}
+)
+
+func TestApiVersionsRequest(t *testing.T) {
+ var request *ApiVersionsRequest
+
+ request = new(ApiVersionsRequest)
+ testRequest(t, "basic", request, apiVersionRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 000000000..23bc326e1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+type ApiVersionsResponseBlock struct {
+ ApiKey int16
+ MinVersion int16
+ MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+ pe.putInt16(b.ApiKey)
+ pe.putInt16(b.MinVersion)
+ pe.putInt16(b.MaxVersion)
+ return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+ var err error
+
+ if b.ApiKey, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MinVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MaxVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type ApiVersionsResponse struct {
+ Err KError
+ ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+ return err
+ }
+ for _, apiVersion := range r.ApiVersions {
+ if err := apiVersion.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+ for i := 0; i < numBlocks; i++ {
+ block := new(ApiVersionsResponseBlock)
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.ApiVersions[i] = block
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response_test.go b/vendor/github.com/Shopify/sarama/api_versions_response_test.go
new file mode 100644
index 000000000..675a65a7d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response_test.go
@@ -0,0 +1,32 @@
+package sarama
+
+import "testing"
+
+var (
+ apiVersionResponse = []byte{
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03,
+ 0x00, 0x02,
+ 0x00, 0x01,
+ }
+)
+
+func TestApiVersionsResponse(t *testing.T) {
+ var response *ApiVersionsResponse
+
+ response = new(ApiVersionsResponse)
+ testVersionDecodable(t, "no error", response, apiVersionResponse, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding error failed: no error expected but found", response.Err)
+ }
+ if response.ApiVersions[0].ApiKey != 0x03 {
+ t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey)
+ }
+ if response.ApiVersions[0].MinVersion != 0x02 {
+ t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion)
+ }
+ if response.ApiVersions[0].MaxVersion != 0x01 {
+ t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 000000000..6d71a6d8f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,904 @@
+package sarama
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+ // AsyncClose triggers a shutdown of the producer. The shutdown has completed
+ // when both the Errors and Successes channels have been closed. When calling
+ // AsyncClose, you *must* continue to read from those channels in order to
+ // drain the results of any messages in flight.
+ AsyncClose()
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when Return.Successes is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]chan<- *ProducerMessage
+ brokerRefs map[chan<- *ProducerMessage]int
+ brokerLock sync.Mutex
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ p.(*asyncProducer).ownClient = true
+ return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]chan<- *ProducerMessage),
+ brokerRefs: make(map[chan<- *ProducerMessage]int),
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+ // Timestamp is the timestamp assigned to the message by the broker. This
+ // is only guaranteed to be defined if the message was successfully
+ // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
+ // least version 0.10.0.
+ Timestamp time.Time
+
+ retries int
+ flags flagSet
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize() int {
+ size := producerMessageOverhead
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for range p.successes {
+ }
+ })
+ }
+
+ var errors ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ errors = append(errors, event)
+ }
+ } else {
+ <-p.errors
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ } else if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ }
+
+ if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ErrMessageSizeTooLarge)
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ if tp.partitioner.RequiresConsistency() {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ output chan<- *ProducerMessage
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ for msg := range pp.input {
+ if msg.retries > pp.highWatermark {
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ continue
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ pp.output <- msg
+ }
+
+ if pp.output != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ pp.output = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.output <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ for set := range bridge {
+ request := set.buildRequest()
+
+ response, err := broker.Produce(request)
+
+ responses <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ }
+ close(responses)
+ })
+
+ return input
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input <-chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+
+ buffer *produceSet
+ timer <-chan time.Time
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg := <-bp.input:
+ if msg == nil {
+ bp.shutdown()
+ return
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ if err := bp.waitForSpace(msg); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+ }
+ case <-bp.timer:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+}
+
+func (bp *brokerProducer) shutdown() {
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+ for _, msg := range msgs {
+ msg.Timestamp = block.Timestamp
+ }
+ }
+ for i, msg := range msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(msgs)
+ // Retriable errors
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ bp.currentRetries[topic][partition] = block.Err
+ bp.parent.retryMessages(msgs, block.Err)
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ // Other non-retriable errors
+ default:
+ bp.parent.returnErrors(msgs, block.Err)
+ }
+ })
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ switch err.(type) {
+ case PacketEncodingError:
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.returnErrors(msgs, err)
+ })
+ default:
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ buf.Remove()
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ if p.ownClient {
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer_test.go b/vendor/github.com/Shopify/sarama/async_producer_test.go
new file mode 100644
index 000000000..07d23533b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer_test.go
@@ -0,0 +1,841 @@
+package sarama
+
+import (
+ "errors"
+ "log"
+ "os"
+ "os/signal"
+ "sync"
+ "testing"
+ "time"
+)
+
+const TestMessage = "ABC THE MESSAGE"
+
+func closeProducer(t *testing.T, p AsyncProducer) {
+ var wg sync.WaitGroup
+ p.AsyncClose()
+
+ wg.Add(2)
+ go func() {
+ for range p.Successes() {
+ t.Error("Unexpected message on Successes()")
+ }
+ wg.Done()
+ }()
+ go func() {
+ for msg := range p.Errors() {
+ t.Error(msg.Err)
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+}
+
+func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
+ expect := successes + errors
+ for expect > 0 {
+ select {
+ case msg := <-p.Errors():
+ if msg.Msg.flags != 0 {
+ t.Error("Message had flags set")
+ }
+ errors--
+ expect--
+ if errors < 0 {
+ t.Error(msg.Err)
+ }
+ case msg := <-p.Successes():
+ if msg.flags != 0 {
+ t.Error("Message had flags set")
+ }
+ successes--
+ expect--
+ if successes < 0 {
+ t.Error("Too many successes")
+ }
+ }
+ }
+ if successes != 0 || errors != 0 {
+ t.Error("Unexpected successes", successes, "or errors", errors)
+ }
+}
+
+type testPartitioner chan *int32
+
+func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
+ part := <-p
+ if part == nil {
+ return 0, errors.New("BOOM")
+ }
+
+ return *part, nil
+}
+
+func (p testPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+func (p testPartitioner) feed(partition int32) {
+ p <- &partition
+}
+
+type flakyEncoder bool
+
+func (f flakyEncoder) Length() int {
+ return len(TestMessage)
+}
+
+func (f flakyEncoder) Encode() ([]byte, error) {
+ if !bool(f) {
+ return nil, errors.New("flaky encoding error")
+ }
+ return []byte(TestMessage), nil
+}
+
+func TestAsyncProducer(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
+ }
+ for i := 0; i < 10; i++ {
+ select {
+ case msg := <-producer.Errors():
+ t.Error(msg.Err)
+ if msg.Msg.flags != 0 {
+ t.Error("Message had flags set")
+ }
+ case msg := <-producer.Successes():
+ if msg.flags != 0 {
+ t.Error("Message had flags set")
+ }
+ if msg.Metadata.(int) != i {
+ t.Error("Message metadata did not match")
+ }
+ }
+ }
+
+ closeProducer(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestAsyncProducerMultipleFlushes(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+ leader.Returns(prodSuccess)
+ leader.Returns(prodSuccess)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 5
+ config.Producer.Return.Successes = true
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for flush := 0; flush < 3; flush++ {
+ for i := 0; i < 5; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ expectResults(t, producer, 5, 0)
+ }
+
+ closeProducer(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestAsyncProducerMultipleBrokers(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader0 := NewMockBroker(t, 2)
+ leader1 := NewMockBroker(t, 3)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
+ metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodResponse0 := new(ProduceResponse)
+ prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader0.Returns(prodResponse0)
+
+ prodResponse1 := new(ProduceResponse)
+ prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
+ leader1.Returns(prodResponse1)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 5
+ config.Producer.Return.Successes = true
+ config.Producer.Partitioner = NewRoundRobinPartitioner
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ expectResults(t, producer, 10, 0)
+
+ closeProducer(t, producer)
+ leader1.Close()
+ leader0.Close()
+ seedBroker.Close()
+}
+
+func TestAsyncProducerCustomPartitioner(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodResponse := new(ProduceResponse)
+ prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodResponse)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 2
+ config.Producer.Return.Successes = true
+ config.Producer.Partitioner = func(topic string) Partitioner {
+ p := make(testPartitioner)
+ go func() {
+ p.feed(0)
+ p <- nil
+ p <- nil
+ p <- nil
+ p.feed(0)
+ }()
+ return p
+ }
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 5; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ expectResults(t, producer, 2, 3)
+
+ closeProducer(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestAsyncProducerFailureRetry(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader1 := NewMockBroker(t, 2)
+ leader2 := NewMockBroker(t, 3)
+
+ metadataLeader1 := new(MetadataResponse)
+ metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
+ metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader1)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ seedBroker.Close()
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ prodNotLeader := new(ProduceResponse)
+ prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
+ leader1.Returns(prodNotLeader)
+
+ metadataLeader2 := new(MetadataResponse)
+ metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
+ metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
+ leader1.Returns(metadataLeader2)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader2.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+ leader1.Close()
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ leader2.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+
+ leader2.Close()
+ closeProducer(t, producer)
+}
+
+func TestAsyncProducerEncoderFailures(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+ leader.Returns(prodSuccess)
+ leader.Returns(prodSuccess)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 1
+ config.Producer.Return.Successes = true
+ config.Producer.Partitioner = NewManualPartitioner
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for flush := 0; flush < 3; flush++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)}
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)}
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)}
+ expectResults(t, producer, 1, 2)
+ }
+
+ closeProducer(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+// If a Kafka broker becomes unavailable and then returns back in service, then
+// producer reconnects to it and continues sending messages.
+func TestAsyncProducerBrokerBounce(t *testing.T) {
+ // Given
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+ leaderAddr := leader.Addr()
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 1
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 1, 0)
+
+ // When: a broker connection gets reset by a broker (network glitch, restart, you name it).
+ leader.Close() // producer should get EOF
+ leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
+ seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
+
+ // Then: a produced message goes through the new broker connection.
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 1, 0)
+
+ closeProducer(t, producer)
+ seedBroker.Close()
+ leader.Close()
+}
+
+func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader1 := NewMockBroker(t, 2)
+ leader2 := NewMockBroker(t, 3)
+
+ metadataLeader1 := new(MetadataResponse)
+ metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
+ metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader1)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Max = 3
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ leader1.Close() // producer should get EOF
+ seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
+ seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
+
+ // ok fine, tell it to go to leader2 finally
+ metadataLeader2 := new(MetadataResponse)
+ metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
+ metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader2)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader2.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+ seedBroker.Close()
+ leader2.Close()
+
+ closeProducer(t, producer)
+}
+
+func TestAsyncProducerMultipleRetries(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader1 := NewMockBroker(t, 2)
+ leader2 := NewMockBroker(t, 3)
+
+ metadataLeader1 := new(MetadataResponse)
+ metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
+ metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader1)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Max = 4
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ prodNotLeader := new(ProduceResponse)
+ prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
+ leader1.Returns(prodNotLeader)
+
+ metadataLeader2 := new(MetadataResponse)
+ metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
+ metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader2)
+ leader2.Returns(prodNotLeader)
+ seedBroker.Returns(metadataLeader1)
+ leader1.Returns(prodNotLeader)
+ seedBroker.Returns(metadataLeader1)
+ leader1.Returns(prodNotLeader)
+ seedBroker.Returns(metadataLeader2)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader2.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ leader2.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+
+ seedBroker.Close()
+ leader1.Close()
+ leader2.Close()
+ closeProducer(t, producer)
+}
+
+func TestAsyncProducerOutOfRetries(t *testing.T) {
+ t.Skip("Enable once bug #294 is fixed.")
+
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ config.Producer.Retry.Max = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+
+ prodNotLeader := new(ProduceResponse)
+ prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
+ leader.Returns(prodNotLeader)
+
+ for i := 0; i < 10; i++ {
+ select {
+ case msg := <-producer.Errors():
+ if msg.Err != ErrNotLeaderForPartition {
+ t.Error(msg.Err)
+ }
+ case <-producer.Successes():
+ t.Error("Unexpected success")
+ }
+ }
+
+ seedBroker.Returns(metadataResponse)
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+
+ expectResults(t, producer, 10, 0)
+
+ leader.Close()
+ seedBroker.Close()
+ safeClose(t, producer)
+}
+
+func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+ leaderAddr := leader.Addr()
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ config.Producer.Retry.Max = 1
+ config.Producer.Partitioner = NewRoundRobinPartitioner
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // prime partition 0
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 1, 0)
+
+ // prime partition 1
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ prodSuccess = new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 1, 0)
+
+ // reboot the broker (the producer will get EOF on its existing connection)
+ leader.Close()
+ leader = NewMockBrokerAddr(t, 2, leaderAddr)
+
+ // send another message on partition 0 to trigger the EOF and retry
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+
+ // tell partition 0 to go to that broker again
+ seedBroker.Returns(metadataResponse)
+
+ // succeed this time
+ prodSuccess = new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 1, 0)
+
+ // shutdown
+ closeProducer(t, producer)
+ seedBroker.Close()
+ leader.Close()
+}
+
+func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 5
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ config.Producer.Retry.Max = 1
+ config.Producer.Partitioner = NewManualPartitioner
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // prime partitions
+ for p := int32(0); p < 2; p++ {
+ for i := 0; i < 5; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
+ }
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 5, 0)
+ }
+
+ // send more messages on partition 0
+ for i := 0; i < 5; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
+ }
+ prodNotLeader := new(ProduceResponse)
+ prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
+ leader.Returns(prodNotLeader)
+
+ time.Sleep(50 * time.Millisecond)
+
+ leader.SetHandlerByMap(map[string]MockResponse{
+ "ProduceRequest": NewMockProduceResponse(t).
+ SetError("my_topic", 0, ErrNoError),
+ })
+
+ // tell partition 0 to go to that broker again
+ seedBroker.Returns(metadataResponse)
+
+ // succeed this time
+ expectResults(t, producer, 5, 0)
+
+ // put five more through
+ for i := 0; i < 5; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
+ }
+ expectResults(t, producer, 5, 0)
+
+ // shutdown
+ closeProducer(t, producer)
+ seedBroker.Close()
+ leader.Close()
+}
+
+func TestAsyncProducerRetryShutdown(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataLeader := new(MetadataResponse)
+ metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = true
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+ producer.AsyncClose()
+ time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
+
+ producer.Input() <- &ProducerMessage{Topic: "FOO"}
+ if err := <-producer.Errors(); err.Err != ErrShuttingDown {
+ t.Error(err)
+ }
+
+ prodNotLeader := new(ProduceResponse)
+ prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
+ leader.Returns(prodNotLeader)
+
+ seedBroker.Returns(metadataLeader)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+ expectResults(t, producer, 10, 0)
+
+ seedBroker.Close()
+ leader.Close()
+
+ // wait for the async-closed producer to shut down fully
+ for err := range producer.Errors() {
+ t.Error(err)
+ }
+}
+
+func TestAsyncProducerNoReturns(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataLeader := new(MetadataResponse)
+ metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataLeader)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 10
+ config.Producer.Return.Successes = false
+ config.Producer.Return.Errors = false
+ config.Producer.Retry.Backoff = 0
+ producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
+ }
+
+ wait := make(chan bool)
+ go func() {
+ if err := producer.Close(); err != nil {
+ t.Error(err)
+ }
+ close(wait)
+ }()
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+
+ <-wait
+ seedBroker.Close()
+ leader.Close()
+}
+
+// This example shows how to use the producer while simultaneously
+// reading the Errors channel to know about any failures.
+func ExampleAsyncProducer_select() {
+ producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ if err := producer.Close(); err != nil {
+ log.Fatalln(err)
+ }
+ }()
+
+ // Trap SIGINT to trigger a shutdown.
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Interrupt)
+
+ var enqueued, errors int
+ProducerLoop:
+ for {
+ select {
+ case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
+ enqueued++
+ case err := <-producer.Errors():
+ log.Println("Failed to produce message", err)
+ errors++
+ case <-signals:
+ break ProducerLoop
+ }
+ }
+
+ log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
+}
+
+// This example shows how to use the producer with separate goroutines
+// reading from the Successes and Errors channels. Note that in order
+// for the Successes channel to be populated, you have to set
+// config.Producer.Return.Successes to true.
+func ExampleAsyncProducer_goroutines() {
+ config := NewConfig()
+ config.Producer.Return.Successes = true
+ producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trap SIGINT to trigger a graceful shutdown.
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Interrupt)
+
+ var (
+ wg sync.WaitGroup
+ enqueued, successes, errors int
+ )
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for range producer.Successes() {
+ successes++
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for err := range producer.Errors() {
+ log.Println(err)
+ errors++
+ }
+ }()
+
+ProducerLoop:
+ for {
+ message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
+ select {
+ case producer.Input() <- message:
+ enqueued++
+
+ case <-signals:
+ producer.AsyncClose() // Trigger a shutdown of the producer.
+ break ProducerLoop
+ }
+ }
+
+ wg.Wait()
+
+ log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 000000000..f57a69094
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,685 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ id int32
+ addr string
+
+ conf *Config
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened int32
+
+ responses chan responsePromise
+ done chan bool
+
+ incomingByteRate metrics.Meter
+ requestRate metrics.Meter
+ requestSize metrics.Histogram
+ requestLatency metrics.Histogram
+ outgoingByteRate metrics.Meter
+ responseRate metrics.Meter
+ responseSize metrics.Histogram
+ brokerIncomingByteRate metrics.Meter
+ brokerRequestRate metrics.Meter
+ brokerRequestSize metrics.Histogram
+ brokerRequestLatency metrics.Histogram
+ brokerOutgoingByteRate metrics.Meter
+ brokerResponseRate metrics.Meter
+ brokerResponseSize metrics.Histogram
+}
+
+type responsePromise struct {
+ requestTime time.Time
+ correlationID int32
+ packets chan []byte
+ errors chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+ return ErrAlreadyConnected
+ }
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ b.lock.Lock()
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := net.Dialer{
+ Timeout: conf.Net.DialTimeout,
+ KeepAlive: conf.Net.KeepAlive,
+ }
+
+ if conf.Net.TLS.Enable {
+ b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+ } else {
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ }
+ if b.connErr != nil {
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ b.conn = newBufConn(b.conn)
+
+ b.conf = conf
+
+ // Create or reuse the global metrics shared between brokers
+ b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+ b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+ b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+ b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+ b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+ b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+ b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+ // Do not gather metrics for seeded broker (only used during bootstrap) because they share
+ // the same id (-1) and are already exposed through the global metrics above
+ if b.id >= 0 {
+ b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
+ b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
+ b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
+ b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
+ b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
+ b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
+ b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+ }
+
+ if conf.Net.SASL.Enable {
+ b.connErr = b.sendAndReceiveSASLPlainAuth()
+ if b.connErr != nil {
+ err = b.conn.Close()
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ }
+
+ b.done = make(chan bool)
+ b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ if b.id >= 0 {
+ Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ go withRecover(b.responseReceiver)
+ })
+
+ return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+
+ atomic.StoreInt32(&b.opened, 0)
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var response *ProduceResponse
+ var err error
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+ response := new(JoinGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+ response := new(SyncGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+ response := new(LeaveGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+ response := new(HeartbeatResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+ response := new(ListGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+ response := new(DescribeGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+ response := new(ApiVersionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ if b.connErr != nil {
+ return nil, b.connErr
+ }
+ return nil, ErrNotConnected
+ }
+
+ if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return nil, err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ return nil, err
+ }
+ b.correlationID++
+
+ if !promiseResponse {
+ // Record request latency without the response
+ b.updateRequestLatencyMetrics(time.Since(requestTime))
+ return nil, nil
+ }
+
+ promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+ b.responses <- promise
+
+ return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+ promise, err := b.send(req, res != nil)
+
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ select {
+ case buf := <-promise.packets:
+ return versionedDecode(buf, res, req.version())
+ case err = <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) encode(pe packetEncoder) (err error) {
+
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+ port, err := strconv.Atoi(portstr)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+ header := make([]byte, 8)
+ for response := range b.responses {
+ if dead != nil {
+ response.errors <- dead
+ continue
+ }
+
+ err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ bytesReadHeader, err := io.ReadFull(b.conn, header)
+ requestLatency := time.Since(response.requestTime)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = decode(header, &decodedHeader)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+ if decodedHeader.correlationID != response.correlationID {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+ response.errors <- dead
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-4)
+ bytesReadBody, err := io.ReadFull(b.conn, buf)
+ b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ response.packets <- buf
+ }
+ close(b.done)
+}
+
+func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
+ rb := &SaslHandshakeRequest{"PLAIN"}
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+ //wait for the response
+ header := make([]byte, 8) // response header
+ _, err = io.ReadFull(b.conn, header)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+ return err
+ }
+ length := binary.BigEndian.Uint32(header[:4])
+ payload := make([]byte, length-4)
+ n, err := io.ReadFull(b.conn, payload)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+ return err
+ }
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &SaslHandshakeResponse{}
+ err = versionedDecode(payload, res, 0)
+ if err != nil {
+ Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+ return err
+ }
+ if res.Err != ErrNoError {
+ Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+ return res.Err
+ }
+ Logger.Print("Successful SASL handshake")
+ return nil
+}
+
+// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
+// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+// message = [authzid] UTF8NUL authcid UTF8NUL passwd
+// authcid = 1*SAFE ; MUST accept up to 255 octets
+// authzid = 1*SAFE ; MUST accept up to 255 octets
+// passwd = 1*SAFE ; MUST accept up to 255 octets
+// UTF8NUL = %x00 ; UTF-8 encoded NUL character
+//
+// SAFE = UTF1 / UTF2 / UTF3 / UTF4
+// ;; any UTF-8 encoded Unicode character except NUL
+//
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
+// of responding to bad credentials but thats how its being done today.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+ if b.conf.Net.SASL.Handshake {
+ handshakeErr := b.sendAndReceiveSASLPlainHandshake()
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+ return handshakeErr
+ }
+ }
+ length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+ authBytes := make([]byte, length+4) //4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+ err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ requestTime := time.Now()
+ bytesWritten, err := b.conn.Write(authBytes)
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ header := make([]byte, 4)
+ n, err := io.ReadFull(b.conn, header)
+ b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+ // If the credentials are valid, we would get a 4 byte response filled with null characters.
+ // Otherwise, the broker closes the connection and we get an EOF
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+ return nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+ b.updateRequestLatencyMetrics(requestLatency)
+ b.responseRate.Mark(1)
+ if b.brokerResponseRate != nil {
+ b.brokerResponseRate.Mark(1)
+ }
+ responseSize := int64(bytes)
+ b.incomingByteRate.Mark(responseSize)
+ if b.brokerIncomingByteRate != nil {
+ b.brokerIncomingByteRate.Mark(responseSize)
+ }
+ b.responseSize.Update(responseSize)
+ if b.brokerResponseSize != nil {
+ b.brokerResponseSize.Update(responseSize)
+ }
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+ requestLatencyInMs := int64(requestLatency / time.Millisecond)
+ b.requestLatency.Update(requestLatencyInMs)
+ if b.brokerRequestLatency != nil {
+ b.brokerRequestLatency.Update(requestLatencyInMs)
+ }
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+ b.requestRate.Mark(1)
+ if b.brokerRequestRate != nil {
+ b.brokerRequestRate.Mark(1)
+ }
+ requestSize := int64(bytes)
+ b.outgoingByteRate.Mark(requestSize)
+ if b.brokerOutgoingByteRate != nil {
+ b.brokerOutgoingByteRate.Mark(requestSize)
+ }
+ b.requestSize.Update(requestSize)
+ if b.brokerRequestSize != nil {
+ b.brokerRequestSize.Update(requestSize)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/broker_test.go b/vendor/github.com/Shopify/sarama/broker_test.go
new file mode 100644
index 000000000..fcbe627fa
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker_test.go
@@ -0,0 +1,328 @@
+package sarama
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func ExampleBroker() {
+ broker := NewBroker("localhost:9092")
+ err := broker.Open(nil)
+ if err != nil {
+ panic(err)
+ }
+
+ request := MetadataRequest{Topics: []string{"myTopic"}}
+ response, err := broker.GetMetadata(&request)
+ if err != nil {
+ _ = broker.Close()
+ panic(err)
+ }
+
+ fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
+
+ if err = broker.Close(); err != nil {
+ panic(err)
+ }
+}
+
+type mockEncoder struct {
+ bytes []byte
+}
+
+func (m mockEncoder) encode(pe packetEncoder) error {
+ return pe.putRawBytes(m.bytes)
+}
+
+type brokerMetrics struct {
+ bytesRead int
+ bytesWritten int
+}
+
+func TestBrokerAccessors(t *testing.T) {
+ broker := NewBroker("abc:123")
+
+ if broker.ID() != -1 {
+ t.Error("New broker didn't have an ID of -1.")
+ }
+
+ if broker.Addr() != "abc:123" {
+ t.Error("New broker didn't have the correct address")
+ }
+
+ broker.id = 34
+ if broker.ID() != 34 {
+ t.Error("Manually setting broker ID did not take effect.")
+ }
+}
+
+func TestSimpleBrokerCommunication(t *testing.T) {
+ for _, tt := range brokerTestTable {
+ Logger.Printf("Testing broker communication for %s", tt.name)
+ mb := NewMockBroker(t, 0)
+ mb.Returns(&mockEncoder{tt.response})
+ pendingNotify := make(chan brokerMetrics)
+ // Register a callback to be notified about successful requests
+ mb.SetNotifier(func(bytesRead, bytesWritten int) {
+ pendingNotify <- brokerMetrics{bytesRead, bytesWritten}
+ })
+ broker := NewBroker(mb.Addr())
+ // Set the broker id in order to validate local broker metrics
+ broker.id = 0
+ conf := NewConfig()
+ conf.Version = V0_10_0_0
+ err := broker.Open(conf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tt.runner(t, broker)
+ err = broker.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ // Wait up to 500 ms for the remote broker to process the request and
+ // notify us about the metrics
+ timeout := 500 * time.Millisecond
+ select {
+ case mockBrokerMetrics := <-pendingNotify:
+ validateBrokerMetrics(t, broker, mockBrokerMetrics)
+ case <-time.After(timeout):
+ t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
+ }
+ mb.Close()
+ }
+
+}
+
+// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
+var brokerTestTable = []struct {
+ name string
+ response []byte
+ runner func(*testing.T, *Broker)
+}{
+ {"MetadataRequest",
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := MetadataRequest{}
+ response, err := broker.GetMetadata(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Metadata request got no response!")
+ }
+ }},
+
+ {"ConsumerMetadataRequest",
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := ConsumerMetadataRequest{}
+ response, err := broker.GetConsumerMetadata(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Consumer Metadata request got no response!")
+ }
+ }},
+
+ {"ProduceRequest (NoResponse)",
+ []byte{},
+ func(t *testing.T, broker *Broker) {
+ request := ProduceRequest{}
+ request.RequiredAcks = NoResponse
+ response, err := broker.Produce(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response != nil {
+ t.Error("Produce request with NoResponse got a response!")
+ }
+ }},
+
+ {"ProduceRequest (WaitForLocal)",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := ProduceRequest{}
+ request.RequiredAcks = WaitForLocal
+ response, err := broker.Produce(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Produce request without NoResponse got no response!")
+ }
+ }},
+
+ {"FetchRequest",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := FetchRequest{}
+ response, err := broker.Fetch(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Fetch request got no response!")
+ }
+ }},
+
+ {"OffsetFetchRequest",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := OffsetFetchRequest{}
+ response, err := broker.FetchOffset(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("OffsetFetch request got no response!")
+ }
+ }},
+
+ {"OffsetCommitRequest",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := OffsetCommitRequest{}
+ response, err := broker.CommitOffset(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("OffsetCommit request got no response!")
+ }
+ }},
+
+ {"OffsetRequest",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := OffsetRequest{}
+ response, err := broker.GetAvailableOffsets(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Offset request got no response!")
+ }
+ }},
+
+ {"JoinGroupRequest",
+ []byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := JoinGroupRequest{}
+ response, err := broker.JoinGroup(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("JoinGroup request got no response!")
+ }
+ }},
+
+ {"SyncGroupRequest",
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := SyncGroupRequest{}
+ response, err := broker.SyncGroup(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("SyncGroup request got no response!")
+ }
+ }},
+
+ {"LeaveGroupRequest",
+ []byte{0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := LeaveGroupRequest{}
+ response, err := broker.LeaveGroup(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("LeaveGroup request got no response!")
+ }
+ }},
+
+ {"HeartbeatRequest",
+ []byte{0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := HeartbeatRequest{}
+ response, err := broker.Heartbeat(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("Heartbeat request got no response!")
+ }
+ }},
+
+ {"ListGroupsRequest",
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := ListGroupsRequest{}
+ response, err := broker.ListGroups(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("ListGroups request got no response!")
+ }
+ }},
+
+ {"DescribeGroupsRequest",
+ []byte{0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := DescribeGroupsRequest{}
+ response, err := broker.DescribeGroups(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("DescribeGroups request got no response!")
+ }
+ }},
+
+ {"ApiVersionsRequest",
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ func(t *testing.T, broker *Broker) {
+ request := ApiVersionsRequest{}
+ response, err := broker.ApiVersions(&request)
+ if err != nil {
+ t.Error(err)
+ }
+ if response == nil {
+ t.Error("ApiVersions request got no response!")
+ }
+ }},
+}
+
+func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
+ metricValidators := newMetricValidators()
+ mockBrokerBytesRead := mockBrokerMetrics.bytesRead
+ mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten
+
+ // Check that the number of bytes sent corresponds to what the mock broker received
+ metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten))
+ if mockBrokerBytesWritten == 0 {
+ // This a ProduceRequest with NoResponse
+ metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0))
+ metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0))
+ metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0))
+ } else {
+ metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1))
+ metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1))
+ metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten))
+ }
+
+ // Check that the number of bytes received corresponds to what the mock broker sent
+ metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead))
+ metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1))
+ metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1))
+ metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead))
+
+ // Run the validators
+ metricValidators.run(t, broker.conf.MetricRegistry)
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 000000000..45de3973d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,779 @@
+package sarama
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Brokers returns the current set of active brokers as retrieved from cluster metadata.
+ Brokers() []*Broker
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // InSyncReplicas returns the set of all in-sync replica IDs for the given
+ // partition. In-sync replicas are replicas which are fully caught up with
+ // the partition leader.
+ InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time on the topic/partition combination. Time should be OffsetOldest for
+ // the earliest available offset, OffsetNewest for the offset of the message that
+ // will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ Logger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ }
+
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ Logger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ brokers := make([]*Broker, 0)
+ for _, broker := range client.brokers {
+ brokers = append(brokers, broker)
+ }
+ return brokers
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ Logger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, allPartitions)
+
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, allPartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, writablePartitions)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, writablePartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return nil, metadata.Err
+ }
+ return dupeAndSort(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return nil, metadata.Err
+ }
+ return dupeAndSort(metadata.Isr), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ leader, err := client.cachedLeader(topic, partitionID)
+
+ if leader == nil {
+ err = client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ leader, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+ for _, topic := range topics {
+ if len(topic) == 0 {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ }
+
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, time)
+
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, time)
+ }
+
+ return offset, err
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ } else {
+ // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+ // but we really shouldn't have to; once that loop is made better this case can be
+ // removed, and the function generally can be renamed from `deregisterBroker` to
+ // `nextSeedBroker` or something
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ // not guaranteed to be random *or* deterministic
+ for _, broker := range client.brokers {
+ _ = broker.Open(client.conf)
+ return broker
+ }
+
+ return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if metadata.Err == ErrLeaderNotAvailable {
+ return nil, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, nil
+ }
+ }
+
+ return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := &OffsetRequest{}
+ if client.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 1
+ }
+ request.AddBlock(topic, partitionID, time, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if block.Err != ErrNoError {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := client.RefreshMetadata(); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+ }
+ return err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ if len(topics) > 0 {
+ Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+ response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
+
+ switch err.(type) {
+ case nil:
+ // valid response, use it
+ shouldRetry, err := client.updateMetadata(response)
+ if shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ }
+ return err
+
+ case PacketEncodingError:
+ // didn't even send, return the error
+ return err
+ default:
+ // some other error, remove that broker and try again
+ Logger.Println("client/metadata got error from broker while fetching metadata:", err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ for _, broker := range data.Brokers {
+ client.registerBroker(broker)
+ }
+
+ for _, topic := range data.Topics {
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ break
+ case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ break
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if partition.Err == ErrLeaderNotAvailable {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
+ retry := func(err error) (*ConsumerMetadataResponse, error) {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+ }
+ return nil, err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+ request := new(ConsumerMetadataRequest)
+ request.ConsumerGroup = consumerGroup
+
+ response, err := broker.GetConsumerMetadata(request)
+
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ switch err.(type) {
+ case PacketEncodingError:
+ return nil, err
+ default:
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ switch response.Err {
+ case ErrNoError:
+ Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+
+ case ErrConsumerCoordinatorNotAvailable:
+ Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ default:
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
diff --git a/vendor/github.com/Shopify/sarama/client_test.go b/vendor/github.com/Shopify/sarama/client_test.go
new file mode 100644
index 000000000..0bac1b405
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client_test.go
@@ -0,0 +1,619 @@
+package sarama
+
+import (
+ "io"
+ "sync"
+ "testing"
+ "time"
+)
+
+func safeClose(t testing.TB, c io.Closer) {
+ err := c.Close()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSimpleClient(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ seedBroker.Returns(new(MetadataResponse))
+
+ client, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestCachedPartitions(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ replicas := []int32{3, 1, 5}
+ isr := []int32{5, 1}
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker("localhost:12345", 2)
+ metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 0
+ c, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client := c.(*client)
+
+ // Verify they aren't cached the same
+ allP := client.cachedPartitionsResults["my_topic"][allPartitions]
+ writeP := client.cachedPartitionsResults["my_topic"][writablePartitions]
+ if len(allP) == len(writeP) {
+ t.Fatal("Invalid lengths!")
+ }
+
+ tmp := client.cachedPartitionsResults["my_topic"]
+ // Verify we actually use the cache at all!
+ tmp[allPartitions] = []int32{1, 2, 3, 4}
+ client.cachedPartitionsResults["my_topic"] = tmp
+ if 4 != len(client.cachedPartitions("my_topic", allPartitions)) {
+ t.Fatal("Not using the cache!")
+ }
+
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ replicas := []int32{seedBroker.BrokerID()}
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 0
+ client, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataResponse = new(MetadataResponse)
+ metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
+ seedBroker.Returns(metadataResponse)
+
+ partitions, err := client.Partitions("unknown")
+
+ if err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, found", err)
+ }
+ if partitions != nil {
+ t.Errorf("Should return nil as partition list, found %v", partitions)
+ }
+
+ // Should still use the cache of a known topic
+ partitions, err = client.Partitions("my_topic")
+ if err != nil {
+ t.Errorf("Expected no error, found %v", err)
+ }
+
+ metadataResponse = new(MetadataResponse)
+ metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
+ seedBroker.Returns(metadataResponse)
+
+ // Should not use cache for unknown topic
+ partitions, err = client.Partitions("unknown")
+ if err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, found", err)
+ }
+ if partitions != nil {
+ t.Errorf("Should return nil as partition list, found %v", partitions)
+ }
+
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientSeedBrokers(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker("localhost:12345", 2)
+ seedBroker.Returns(metadataResponse)
+
+ client, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientMetadata(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 5)
+
+ replicas := []int32{3, 1, 5}
+ isr := []int32{5, 1}
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError)
+ metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable)
+ seedBroker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 0
+ client, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ topics, err := client.Topics()
+ if err != nil {
+ t.Error(err)
+ } else if len(topics) != 1 || topics[0] != "my_topic" {
+ t.Error("Client returned incorrect topics:", topics)
+ }
+
+ parts, err := client.Partitions("my_topic")
+ if err != nil {
+ t.Error(err)
+ } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 {
+ t.Error("Client returned incorrect partitions for my_topic:", parts)
+ }
+
+ parts, err = client.WritablePartitions("my_topic")
+ if err != nil {
+ t.Error(err)
+ } else if len(parts) != 1 || parts[0] != 0 {
+ t.Error("Client returned incorrect writable partitions for my_topic:", parts)
+ }
+
+ tst, err := client.Leader("my_topic", 0)
+ if err != nil {
+ t.Error(err)
+ } else if tst.ID() != 5 {
+ t.Error("Leader for my_topic had incorrect ID.")
+ }
+
+ replicas, err = client.Replicas("my_topic", 0)
+ if err != nil {
+ t.Error(err)
+ } else if replicas[0] != 1 {
+ t.Error("Incorrect (or unsorted) replica")
+ } else if replicas[1] != 3 {
+ t.Error("Incorrect (or unsorted) replica")
+ } else if replicas[2] != 5 {
+ t.Error("Incorrect (or unsorted) replica")
+ }
+
+ isr, err = client.InSyncReplicas("my_topic", 0)
+ if err != nil {
+ t.Error(err)
+ } else if len(isr) != 2 {
+ t.Error("Client returned incorrect ISRs for partition:", isr)
+ } else if isr[0] != 1 {
+ t.Error("Incorrect (or unsorted) ISR:", isr)
+ } else if isr[1] != 5 {
+ t.Error("Incorrect (or unsorted) ISR:", isr)
+ }
+
+ leader.Close()
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientGetOffset(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+ leaderAddr := leader.Addr()
+
+ metadata := new(MetadataResponse)
+ metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ metadata.AddBroker(leaderAddr, leader.BrokerID())
+ seedBroker.Returns(metadata)
+
+ client, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ offsetResponse := new(OffsetResponse)
+ offsetResponse.AddTopicPartition("foo", 0, 123)
+ leader.Returns(offsetResponse)
+
+ offset, err := client.GetOffset("foo", 0, OffsetNewest)
+ if err != nil {
+ t.Error(err)
+ }
+ if offset != 123 {
+ t.Error("Unexpected offset, got ", offset)
+ }
+
+ leader.Close()
+ seedBroker.Returns(metadata)
+
+ leader = NewMockBrokerAddr(t, 2, leaderAddr)
+ offsetResponse = new(OffsetResponse)
+ offsetResponse.AddTopicPartition("foo", 0, 456)
+ leader.Returns(offsetResponse)
+
+ offset, err = client.GetOffset("foo", 0, OffsetNewest)
+ if err != nil {
+ t.Error(err)
+ }
+ if offset != 456 {
+ t.Error("Unexpected offset, got ", offset)
+ }
+
+ seedBroker.Close()
+ leader.Close()
+ safeClose(t, client)
+}
+
+func TestClientReceivingUnknownTopic(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ metadataResponse1 := new(MetadataResponse)
+ seedBroker.Returns(metadataResponse1)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 1
+ config.Metadata.Retry.Backoff = 0
+ client, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataUnknownTopic := new(MetadataResponse)
+ metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition)
+ seedBroker.Returns(metadataUnknownTopic)
+ seedBroker.Returns(metadataUnknownTopic)
+
+ if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition {
+ t.Error("ErrUnknownTopicOrPartition expected, got", err)
+ }
+
+ // If we are asking for the leader of a partition of the non-existing topic.
+ // we will request metadata again.
+ seedBroker.Returns(metadataUnknownTopic)
+ seedBroker.Returns(metadataUnknownTopic)
+
+ if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, got", err)
+ }
+
+ safeClose(t, client)
+ seedBroker.Close()
+}
+
+func TestClientReceivingPartialMetadata(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 5)
+
+ metadataResponse1 := new(MetadataResponse)
+ metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
+ seedBroker.Returns(metadataResponse1)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 0
+ client, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()}
+
+ metadataPartial := new(MetadataResponse)
+ metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable)
+ metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError)
+ metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable)
+ seedBroker.Returns(metadataPartial)
+
+ if err := client.RefreshMetadata("new_topic"); err != nil {
+ t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error")
+ }
+
+ // Even though the metadata was incomplete, we should be able to get the leader of a partition
+ // for which we did get a useful response, without doing additional requests.
+
+ partition0Leader, err := client.Leader("new_topic", 0)
+ if err != nil {
+ t.Error(err)
+ } else if partition0Leader.Addr() != leader.Addr() {
+ t.Error("Unexpected leader returned", partition0Leader.Addr())
+ }
+
+ // If we are asking for the leader of a partition that didn't have a leader before,
+ // we will do another metadata request.
+
+ seedBroker.Returns(metadataPartial)
+
+ // Still no leader for the partition, so asking for it should return an error.
+ _, err = client.Leader("new_topic", 1)
+ if err != ErrLeaderNotAvailable {
+ t.Error("Expected ErrLeaderNotAvailable, got", err)
+ }
+
+ safeClose(t, client)
+ seedBroker.Close()
+ leader.Close()
+}
+
+func TestClientRefreshBehaviour(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 5)
+
+ metadataResponse1 := new(MetadataResponse)
+ metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
+ seedBroker.Returns(metadataResponse1)
+
+ metadataResponse2 := new(MetadataResponse)
+ metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse2)
+
+ client, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parts, err := client.Partitions("my_topic")
+ if err != nil {
+ t.Error(err)
+ } else if len(parts) != 1 || parts[0] != 0xb {
+ t.Error("Client returned incorrect partitions for my_topic:", parts)
+ }
+
+ tst, err := client.Leader("my_topic", 0xb)
+ if err != nil {
+ t.Error(err)
+ } else if tst.ID() != 5 {
+ t.Error("Leader for my_topic had incorrect ID.")
+ }
+
+ leader.Close()
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientResurrectDeadSeeds(t *testing.T) {
+ initialSeed := NewMockBroker(t, 0)
+ emptyMetadata := new(MetadataResponse)
+ initialSeed.Returns(emptyMetadata)
+
+ conf := NewConfig()
+ conf.Metadata.Retry.Backoff = 0
+ conf.Metadata.RefreshFrequency = 0
+ c, err := NewClient([]string{initialSeed.Addr()}, conf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ initialSeed.Close()
+
+ client := c.(*client)
+
+ seed1 := NewMockBroker(t, 1)
+ seed2 := NewMockBroker(t, 2)
+ seed3 := NewMockBroker(t, 3)
+ addr1 := seed1.Addr()
+ addr2 := seed2.Addr()
+ addr3 := seed3.Addr()
+
+ // Overwrite the seed brokers with a fixed ordering to make this test deterministic.
+ safeClose(t, client.seedBrokers[0])
+ client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)}
+ client.deadSeeds = []*Broker{}
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ if err := client.RefreshMetadata(); err != nil {
+ t.Error(err)
+ }
+ wg.Done()
+ }()
+ seed1.Close()
+ seed2.Close()
+
+ seed1 = NewMockBrokerAddr(t, 1, addr1)
+ seed2 = NewMockBrokerAddr(t, 2, addr2)
+
+ seed3.Close()
+
+ seed1.Close()
+ seed2.Returns(emptyMetadata)
+
+ wg.Wait()
+
+ if len(client.seedBrokers) != 2 {
+ t.Error("incorrect number of live seeds")
+ }
+ if len(client.deadSeeds) != 1 {
+ t.Error("incorrect number of dead seeds")
+ }
+
+ safeClose(t, c)
+}
+
+func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ staleCoordinator := NewMockBroker(t, 2)
+ freshCoordinator := NewMockBroker(t, 3)
+
+ replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()}
+ metadataResponse1 := new(MetadataResponse)
+ metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID())
+ metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID())
+ metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
+ seedBroker.Returns(metadataResponse1)
+
+ client, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ coordinatorResponse1 := new(ConsumerMetadataResponse)
+ coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
+ seedBroker.Returns(coordinatorResponse1)
+
+ coordinatorResponse2 := new(ConsumerMetadataResponse)
+ coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID()
+ coordinatorResponse2.CoordinatorHost = "127.0.0.1"
+ coordinatorResponse2.CoordinatorPort = staleCoordinator.Port()
+
+ seedBroker.Returns(coordinatorResponse2)
+
+ broker, err := client.Coordinator("my_group")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if staleCoordinator.Addr() != broker.Addr() {
+ t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr())
+ }
+
+ if staleCoordinator.BrokerID() != broker.ID() {
+ t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID())
+ }
+
+ // Grab the cached value
+ broker2, err := client.Coordinator("my_group")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if broker2.Addr() != broker.Addr() {
+ t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr())
+ }
+
+ coordinatorResponse3 := new(ConsumerMetadataResponse)
+ coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID()
+ coordinatorResponse3.CoordinatorHost = "127.0.0.1"
+ coordinatorResponse3.CoordinatorPort = freshCoordinator.Port()
+
+ seedBroker.Returns(coordinatorResponse3)
+
+ // Refresh the locally cahced value because it's stale
+ if err := client.RefreshCoordinator("my_group"); err != nil {
+ t.Error(err)
+ }
+
+ // Grab the fresh value
+ broker3, err := client.Coordinator("my_group")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if broker3.Addr() != freshCoordinator.Addr() {
+ t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr())
+ }
+
+ freshCoordinator.Close()
+ staleCoordinator.Close()
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ coordinator := NewMockBroker(t, 2)
+
+ metadataResponse1 := new(MetadataResponse)
+ seedBroker.Returns(metadataResponse1)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 1
+ config.Metadata.Retry.Backoff = 0
+ client, err := NewClient([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ coordinatorResponse1 := new(ConsumerMetadataResponse)
+ coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
+ seedBroker.Returns(coordinatorResponse1)
+
+ metadataResponse2 := new(MetadataResponse)
+ metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition)
+ seedBroker.Returns(metadataResponse2)
+
+ replicas := []int32{coordinator.BrokerID()}
+ metadataResponse3 := new(MetadataResponse)
+ metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
+ seedBroker.Returns(metadataResponse3)
+
+ coordinatorResponse2 := new(ConsumerMetadataResponse)
+ coordinatorResponse2.CoordinatorID = coordinator.BrokerID()
+ coordinatorResponse2.CoordinatorHost = "127.0.0.1"
+ coordinatorResponse2.CoordinatorPort = coordinator.Port()
+
+ seedBroker.Returns(coordinatorResponse2)
+
+ broker, err := client.Coordinator("my_group")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if coordinator.Addr() != broker.Addr() {
+ t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr())
+ }
+
+ if coordinator.BrokerID() != broker.ID() {
+ t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID())
+ }
+
+ coordinator.Close()
+ seedBroker.Close()
+ safeClose(t, client)
+}
+
+func TestClientAutorefreshShutdownRace(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+
+ metadataResponse := new(MetadataResponse)
+ seedBroker.Returns(metadataResponse)
+
+ conf := NewConfig()
+ conf.Metadata.RefreshFrequency = 100 * time.Millisecond
+ client, err := NewClient([]string{seedBroker.Addr()}, conf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Wait for the background refresh to kick in
+ time.Sleep(110 * time.Millisecond)
+
+ done := make(chan none)
+ go func() {
+ // Close the client
+ if err := client.Close(); err != nil {
+ t.Fatal(err)
+ }
+ close(done)
+ }()
+
+ // Wait for the Close to kick in
+ time.Sleep(10 * time.Millisecond)
+
+ // Then return some metadata to the still-running background thread
+ leader := NewMockBroker(t, 2)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ <-done
+
+ seedBroker.Close()
+
+ // give the update time to happen so we get a panic if it's still running (which it shouldn't)
+ time.Sleep(10 * time.Millisecond)
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 000000000..606a4fabe
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,423 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "regexp"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // SASL based authentication with broker. While there are multiple SASL authentication methods
+ // the current implementation is limited to plaintext (SASL/PLAIN) authentication
+ SASL struct {
+ // Whether or not to use SASL authentication when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // Whether or not to send the Kafka SASL handshake first if enabled
+ // (defaults to true). You should only set this to false if you're using
+ // a non-Kafka SASL proxy.
+ Handshake bool
+ //username and password for SASL/PLAIN authentication
+ User string
+ Password string
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection.
+ // If zero, keep-alives are disabled. (default is 0: disabled).
+ KeepAlive time.Duration
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ }
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ //
+ // Note that Sarama's Consumer type does not currently support automatic
+ // consumer-group rebalancing and offset tracking. For Zookeeper-based
+ // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
+ // library builds on Sarama to add this support. For Kafka-based tracking
+ // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
+ // builds on Sarama to add this support.
+ Consumer struct {
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 32768). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.wait.max.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to process
+ // for the user. If writing to the Messages channel takes longer than this,
+ // that partition will stop fetching more messages until it can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occurred while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // How frequently to commit updated offsets. Defaults to 1s.
+ CommitInterval time.Duration
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+
+ // The retention duration for committed offsets. If zero, disabled
+ // (in which case the `offsets.retention.minutes` option on the
+ // broker will be used). Kafka only supports precision up to
+ // milliseconds; nanoseconds will be truncated. Requires Kafka
+ // broker version 0.9.0 or later.
+ // (default is 0: disabled).
+ Retention time.Duration
+ }
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+ // The version of Kafka that Sarama will assume it is running against.
+ // Defaults to the oldest supported stable version. Since Kafka provides
+ // backwards-compatibility, setting it to a version older than you have
+ // will not break anything, although it may prevent you from using the
+ // latest features. Setting it to a version greater than you are actually
+ // running may lead to random breakage.
+ Version KafkaVersion
+ // The registry to define metrics into.
+ // Defaults to a local registry.
+ // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+ // prior to starting Sarama.
+ // See Examples on how to use the metrics registry
+ MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+ c.Net.SASL.Handshake = true
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+
+ c.Producer.MaxMessageBytes = 1000000
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 32768
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 250 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.CommitInterval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+
+ c.ClientID = defaultClientID
+ c.ChannelBufferSize = 256
+ c.Version = minVersion
+ c.MetricRegistry = metrics.NewRegistry()
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if c.Net.SASL.Enable == false {
+ if c.Net.SASL.User != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+ }
+ if c.Net.SASL.Password != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+ }
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+ Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+ Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == defaultClientID {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.KeepAlive < 0:
+ return ConfigurationError("Net.KeepAlive must be >= 0")
+ case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+ return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.CommitInterval <= 0:
+ return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ case !validID.MatchString(c.ClientID):
+ return ConfigurationError("ClientID is invalid")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_test.go b/vendor/github.com/Shopify/sarama/config_test.go
new file mode 100644
index 000000000..5fef6b361
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config_test.go
@@ -0,0 +1,70 @@
+package sarama
+
+import (
+ "os"
+ "testing"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+func TestDefaultConfigValidates(t *testing.T) {
+ config := NewConfig()
+ if err := config.Validate(); err != nil {
+ t.Error(err)
+ }
+ if config.MetricRegistry == nil {
+ t.Error("Expected non nil metrics.MetricRegistry, got nil")
+ }
+}
+
+func TestInvalidClientIDConfigValidates(t *testing.T) {
+ config := NewConfig()
+ config.ClientID = "foo:bar"
+ if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
+ t.Error("Expected invalid ClientID, got ", err)
+ }
+}
+
+func TestEmptyClientIDConfigValidates(t *testing.T) {
+ config := NewConfig()
+ config.ClientID = ""
+ if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
+ t.Error("Expected invalid ClientID, got ", err)
+ }
+}
+
+func TestLZ4ConfigValidation(t *testing.T) {
+ config := NewConfig()
+ config.Producer.Compression = CompressionLZ4
+ if err := config.Validate(); string(err.(ConfigurationError)) != "lz4 compression requires Version >= V0_10_0_0" {
+ t.Error("Expected invalid lz4/kakfa version error, got ", err)
+ }
+ config.Version = V0_10_0_0
+ if err := config.Validate(); err != nil {
+ t.Error("Expected lz4 to work, got ", err)
+ }
+}
+
+// This example shows how to integrate with an existing registry as well as publishing metrics
+// on the standard output
+func ExampleConfig_metrics() {
+ // Our application registry
+ appMetricRegistry := metrics.NewRegistry()
+ appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry)
+ appGauge.Update(1)
+
+ config := NewConfig()
+ // Use a prefix registry instead of the default local one
+ config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.")
+
+ // Simulate a metric created by sarama without starting a broker
+ saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry)
+ saramaGauge.Update(2)
+
+ metrics.WriteOnce(appMetricRegistry, os.Stdout)
+ // Output:
+ // gauge m1
+ // value: 1
+ // gauge sarama.m2
+ // value: 2
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 000000000..c82b994c4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,741 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
+// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
+// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
+// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+type Consumer interface {
+
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // HighWaterMarks returns the current high water marks for each topic and partition.
+ // Consistency between partitions is not guaranteed since high water marks are updated separately.
+ HighWaterMarks() map[string]map[int32]int64
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+}
+
+type consumer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ lock sync.Mutex
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := NewConsumerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ c.(*consumer).ownClient = true
+ return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ if c.ownClient {
+ return c.client.Close()
+ }
+ return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ hwms := make(map[string]map[int32]int64)
+ for topic, p := range c.children {
+ hwm := make(map[int32]int64, len(p))
+ for partition, pc := range p {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
+// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
+// when it passes out of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+type PartitionConsumer interface {
+
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will
+ // return immediately, after which you should wait until the 'messages' and
+ // 'errors' channel are drained. It is required to call this function, or
+ // Close before a consumer object passes out of scope, as it will otherwise
+ // leak memory. You must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It is required to
+ // call this function (or AsyncClose) before a consumer object passes out of
+ // scope, as it will otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occurred during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+ highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ consumer *consumer
+ conf *Config
+ topic string
+ partition int32
+
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ trigger, dying chan none
+ responseResult error
+
+ fetchSize int32
+ offset int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.conf.Consumer.Retry.Backoff):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+ return err
+ }
+
+ child.broker = child.consumer.refBrokerConsumer(leader)
+
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ close(child.dying)
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ go withRecover(func() {
+ for range child.messages {
+ // drain
+ }
+ })
+
+ var errors ConsumerErrors
+ for err := range child.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+ expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
+ expireTimedOut := false
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+
+ for i, msg := range msgs {
+ if !expiryTimer.Stop() && !expireTimedOut {
+ // expiryTimer was expired; clear out the waiting msg
+ <-expiryTimer.C
+ }
+ expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
+ expireTimedOut = false
+
+ select {
+ case child.messages <- msg:
+ case <-expiryTimer.C:
+ expireTimedOut = true
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ for _, msg = range msgs[i:] {
+ child.messages <- msg
+ }
+ child.broker.input <- child
+ continue feederLoop
+ }
+ }
+
+ child.broker.acks.Done()
+ }
+
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if block.Err != ErrNoError {
+ return nil, block.Err
+ }
+
+ if len(block.MsgSet.Messages) == 0 {
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if block.MsgSet.PartialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+ incomplete := false
+ prelude := true
+ var messages []*ConsumerMessage
+ for _, msgBlock := range block.MsgSet.Messages {
+
+ for _, msg := range msgBlock.Messages() {
+ offset := msg.Offset
+ if msg.Msg.Version >= 1 {
+ baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+ offset += baseOffset
+ }
+ if prelude && offset < child.offset {
+ continue
+ }
+ prelude = false
+
+ if offset >= child.offset {
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: msg.Msg.Timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
+ })
+ child.offset = offset + 1
+ } else {
+ incomplete = true
+ }
+ }
+
+ }
+
+ if incomplete || len(messages) == 0 {
+ return nil, ErrIncompleteResponse
+ }
+ return messages, nil
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ wait chan none
+ subscriptions map[*partitionConsumer]none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ wait: make(chan none),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+ var buffer []*partitionConsumer
+
+ // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+ // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+ // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+ // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+ // so the main goroutine can block waiting for work if it has none.
+ for {
+ if len(buffer) > 0 {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- buffer:
+ buffer = nil
+ case bc.wait <- none{}:
+ }
+ } else {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- nil:
+ }
+ }
+ }
+
+done:
+ close(bc.wait)
+ if len(buffer) > 0 {
+ bc.newSubscriptions <- buffer
+ }
+ close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+ <-bc.wait // wait for our first piece of work
+
+ // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Either way, the signal just hasn't propagated to our goroutine yet.
+ <-bc.wait
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ break
+ }
+ }
+}
+
+func (bc *brokerConsumer) handleResponses() {
+ // handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ switch result {
+ case nil:
+ break
+ case errTimedOut:
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ case ErrOffsetOutOfRange:
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ default:
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscriptions := range bc.newSubscriptions {
+ if len(newSubscriptions) == 0 {
+ <-bc.wait
+ continue
+ }
+ for _, child := range newSubscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+ request.Version = 2
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
+
+ for child := range bc.subscriptions {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 000000000..9d92d350a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,94 @@
+package sarama
+
+type ConsumerGroupMemberMetadata struct {
+ Version int16
+ Topics []string
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putStringArray(m.Topics); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ if m.Topics, err = pd.getStringArray(); err != nil {
+ return
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+type ConsumerGroupMemberAssignment struct {
+ Version int16
+ Topics map[string][]int32
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go
new file mode 100644
index 000000000..d65e8adc4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go
@@ -0,0 +1,73 @@
+package sarama
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+var (
+ groupMemberMetadata = []byte{
+ 0, 1, // Version
+ 0, 0, 0, 2, // Topic array length
+ 0, 3, 'o', 'n', 'e', // Topic one
+ 0, 3, 't', 'w', 'o', // Topic two
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
+ }
+ groupMemberAssignment = []byte{
+ 0, 1, // Version
+ 0, 0, 0, 1, // Topic array length
+ 0, 3, 'o', 'n', 'e', // Topic one
+ 0, 0, 0, 3, // Topic one, partition array length
+ 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
+ }
+)
+
+func TestConsumerGroupMemberMetadata(t *testing.T) {
+ meta := &ConsumerGroupMemberMetadata{
+ Version: 1,
+ Topics: []string{"one", "two"},
+ UserData: []byte{0x01, 0x02, 0x03},
+ }
+
+ buf, err := encode(meta, nil)
+ if err != nil {
+ t.Error("Failed to encode data", err)
+ } else if !bytes.Equal(groupMemberMetadata, buf) {
+ t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf)
+ }
+
+ meta2 := new(ConsumerGroupMemberMetadata)
+ err = decode(buf, meta2)
+ if err != nil {
+ t.Error("Failed to decode data", err)
+ } else if !reflect.DeepEqual(meta, meta2) {
+ t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2)
+ }
+}
+
+func TestConsumerGroupMemberAssignment(t *testing.T) {
+ amt := &ConsumerGroupMemberAssignment{
+ Version: 1,
+ Topics: map[string][]int32{
+ "one": {0, 2, 4},
+ },
+ UserData: []byte{0x01, 0x02, 0x03},
+ }
+
+ buf, err := encode(amt, nil)
+ if err != nil {
+ t.Error("Failed to encode data", err)
+ } else if !bytes.Equal(groupMemberAssignment, buf) {
+ t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf)
+ }
+
+ amt2 := new(ConsumerGroupMemberAssignment)
+ err = decode(buf, amt2)
+ if err != nil {
+ t.Error("Failed to decode data", err)
+ } else if !reflect.DeepEqual(amt, amt2) {
+ t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 000000000..483be3354
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,26 @@
+package sarama
+
+type ConsumerMetadataRequest struct {
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ return pe.putString(r.ConsumerGroup)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.ConsumerGroup, err = pd.getString()
+ return err
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
new file mode 100644
index 000000000..4509631a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
@@ -0,0 +1,19 @@
+package sarama
+
+import "testing"
+
+var (
+ consumerMetadataRequestEmpty = []byte{
+ 0x00, 0x00}
+
+ consumerMetadataRequestString = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
+)
+
+func TestConsumerMetadataRequest(t *testing.T) {
+ request := new(ConsumerMetadataRequest)
+ testRequest(t, "empty string", request, consumerMetadataRequestEmpty)
+
+ request.ConsumerGroup = "foobar"
+ testRequest(t, "with string", request, consumerMetadataRequestString)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 000000000..6b9632bba
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+type ConsumerMetadataResponse struct {
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ coordinator := new(Broker)
+ if err := coordinator.decode(pd); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ r.Coordinator = coordinator
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if r.Coordinator != nil {
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ pe.putInt32(r.Coordinator.ID())
+ if err := pe.putString(host); err != nil {
+ return err
+ }
+ pe.putInt32(int32(port))
+ return nil
+ }
+ pe.putInt32(r.CoordinatorID)
+ if err := pe.putString(r.CoordinatorHost); err != nil {
+ return err
+ }
+ pe.putInt32(r.CoordinatorPort)
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
new file mode 100644
index 000000000..b748784d7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
@@ -0,0 +1,35 @@
+package sarama
+
+import "testing"
+
+var (
+ consumerMetadataResponseError = []byte{
+ 0x00, 0x0E,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+
+ consumerMetadataResponseSuccess = []byte{
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xAB,
+ 0x00, 0x03, 'f', 'o', 'o',
+ 0x00, 0x00, 0xCC, 0xDD}
+)
+
+func TestConsumerMetadataResponseError(t *testing.T) {
+ response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
+ testResponse(t, "error", &response, consumerMetadataResponseError)
+}
+
+func TestConsumerMetadataResponseSuccess(t *testing.T) {
+ broker := NewBroker("foo:52445")
+ broker.id = 0xAB
+ response := ConsumerMetadataResponse{
+ Coordinator: broker,
+ CoordinatorID: 0xAB,
+ CoordinatorHost: "foo",
+ CoordinatorPort: 0xCCDD,
+ Err: ErrNoError,
+ }
+ testResponse(t, "success", &response, consumerMetadataResponseSuccess)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_test.go b/vendor/github.com/Shopify/sarama/consumer_test.go
new file mode 100644
index 000000000..387ede314
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_test.go
@@ -0,0 +1,854 @@
+package sarama
+
+import (
+ "log"
+ "os"
+ "os/signal"
+ "sync"
+ "testing"
+ "time"
+)
+
+var testMsg = StringEncoder("Foo")
+
+// If a particular offset is provided then messages are consumed starting from
+// that offset.
+func TestConsumerOffsetManual(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+
+ mockFetchResponse := NewMockFetchResponse(t, 1)
+ for i := 0; i < 10; i++ {
+ mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
+ }
+
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 0).
+ SetOffset("my_topic", 0, OffsetNewest, 2345),
+ "FetchRequest": mockFetchResponse,
+ })
+
+ // When
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ consumer, err := master.ConsumePartition("my_topic", 0, 1234)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then: messages starting from offset 1234 are consumed.
+ for i := 0; i < 10; i++ {
+ select {
+ case message := <-consumer.Messages():
+ assertMessageOffset(t, message, int64(i+1234))
+ case err := <-consumer.Errors():
+ t.Error(err)
+ }
+ }
+
+ safeClose(t, consumer)
+ safeClose(t, master)
+ broker0.Close()
+}
+
+// If `OffsetNewest` is passed as the initial offset then the first consumed
+// message is indeed corresponds to the offset that broker claims to be the
+// newest in its metadata response.
+func TestConsumerOffsetNewest(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 10).
+ SetOffset("my_topic", 0, OffsetOldest, 7),
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 9, testMsg).
+ SetMessage("my_topic", 0, 10, testMsg).
+ SetMessage("my_topic", 0, 11, testMsg).
+ SetHighWaterMark("my_topic", 0, 14),
+ })
+
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then
+ assertMessageOffset(t, <-consumer.Messages(), 10)
+ if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
+ t.Errorf("Expected high water mark offset 14, found %d", hwmo)
+ }
+
+ safeClose(t, consumer)
+ safeClose(t, master)
+ broker0.Close()
+}
+
+// It is possible to close a partition consumer and create the same anew.
+func TestConsumerRecreate(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 0).
+ SetOffset("my_topic", 0, OffsetNewest, 1000),
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 10, testMsg),
+ })
+
+ c, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pc, err := c.ConsumePartition("my_topic", 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assertMessageOffset(t, <-pc.Messages(), 10)
+
+ // When
+ safeClose(t, pc)
+ pc, err = c.ConsumePartition("my_topic", 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then
+ assertMessageOffset(t, <-pc.Messages(), 10)
+
+ safeClose(t, pc)
+ safeClose(t, c)
+ broker0.Close()
+}
+
+// An attempt to consume the same partition twice should fail.
+func TestConsumerDuplicate(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 0).
+ SetOffset("my_topic", 0, OffsetNewest, 1000),
+ "FetchRequest": NewMockFetchResponse(t, 1),
+ })
+
+ config := NewConfig()
+ config.ChannelBufferSize = 0
+ c, err := NewConsumer([]string{broker0.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pc1, err := c.ConsumePartition("my_topic", 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ pc2, err := c.ConsumePartition("my_topic", 0, 0)
+
+ // Then
+ if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
+ t.Fatal("A partition cannot be consumed twice at the same time")
+ }
+
+ safeClose(t, pc1)
+ safeClose(t, c)
+ broker0.Close()
+}
+
+// If consumer fails to refresh metadata it keeps retrying with frequency
+// specified by `Config.Consumer.Retry.Backoff`.
+func TestConsumerLeaderRefreshError(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 100)
+
+ // Stage 1: my_topic/0 served by broker0
+ Logger.Printf(" STAGE 1")
+
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 123).
+ SetOffset("my_topic", 0, OffsetNewest, 1000),
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 123, testMsg),
+ })
+
+ config := NewConfig()
+ config.Net.ReadTimeout = 100 * time.Millisecond
+ config.Consumer.Retry.Backoff = 200 * time.Millisecond
+ config.Consumer.Return.Errors = true
+ config.Metadata.Retry.Max = 0
+ c, err := NewConsumer([]string{broker0.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertMessageOffset(t, <-pc.Messages(), 123)
+
+ // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
+ // but the requests to retrieve metadata fail with network timeout.
+ Logger.Printf(" STAGE 2")
+
+ fetchResponse2 := &FetchResponse{}
+ fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
+
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": NewMockWrapper(fetchResponse2),
+ })
+
+ if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
+ t.Errorf("Unexpected error: %v", consErr.Err)
+ }
+
+ // Stage 3: finally the metadata returned by broker0 tells that broker1 is
+ // a new leader for my_topic/0. Consumption resumes.
+
+ Logger.Printf(" STAGE 3")
+
+ broker1 := NewMockBroker(t, 101)
+
+ broker1.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 124, testMsg),
+ })
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetBroker(broker1.Addr(), broker1.BrokerID()).
+ SetLeader("my_topic", 0, broker1.BrokerID()),
+ })
+
+ assertMessageOffset(t, <-pc.Messages(), 124)
+
+ safeClose(t, pc)
+ safeClose(t, c)
+ broker1.Close()
+ broker0.Close()
+}
+
+func TestConsumerInvalidTopic(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 100)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()),
+ })
+
+ c, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
+
+ // Then
+ if pc != nil || err != ErrUnknownTopicOrPartition {
+ t.Errorf("Should fail with, err=%v", err)
+ }
+
+ safeClose(t, c)
+ broker0.Close()
+}
+
+// Nothing bad happens if a partition consumer that has no leader assigned at
+// the moment is closed.
+func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 100)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 123).
+ SetOffset("my_topic", 0, OffsetNewest, 1000),
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 123, testMsg),
+ })
+
+ config := NewConfig()
+ config.Net.ReadTimeout = 100 * time.Millisecond
+ config.Consumer.Retry.Backoff = 100 * time.Millisecond
+ config.Consumer.Return.Errors = true
+ config.Metadata.Retry.Max = 0
+ c, err := NewConsumer([]string{broker0.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertMessageOffset(t, <-pc.Messages(), 123)
+
+ // broker0 says that it is no longer the leader for my_topic/0, but the
+ // requests to retrieve metadata fail with network timeout.
+ fetchResponse2 := &FetchResponse{}
+ fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
+
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": NewMockWrapper(fetchResponse2),
+ })
+
+ // When
+ if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
+ t.Errorf("Unexpected error: %v", consErr.Err)
+ }
+
+ // Then: the partition consumer can be closed without any problem.
+ safeClose(t, pc)
+ safeClose(t, c)
+ broker0.Close()
+}
+
+// If the initial offset passed on partition consumer creation is out of the
+// actual offset range for the partition, then the partition consumer stops
+// immediately closing its output channels.
+func TestConsumerShutsDownOutOfRange(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ fetchResponse := new(FetchResponse)
+ fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 1234).
+ SetOffset("my_topic", 0, OffsetOldest, 7),
+ "FetchRequest": NewMockWrapper(fetchResponse),
+ })
+
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ consumer, err := master.ConsumePartition("my_topic", 0, 101)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then: consumer should shut down closing its messages and errors channels.
+ if _, ok := <-consumer.Messages(); ok {
+ t.Error("Expected the consumer to shut down")
+ }
+ safeClose(t, consumer)
+
+ safeClose(t, master)
+ broker0.Close()
+}
+
+// If a fetch response contains messages with offsets that are smaller then
+// requested, then such messages are ignored.
+func TestConsumerExtraOffsets(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ fetchResponse1 := &FetchResponse{}
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
+ fetchResponse2 := &FetchResponse{}
+ fetchResponse2.AddError("my_topic", 0, ErrNoError)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 1234).
+ SetOffset("my_topic", 0, OffsetOldest, 0),
+ "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
+ })
+
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ consumer, err := master.ConsumePartition("my_topic", 0, 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then: messages with offsets 1 and 2 are not returned even though they
+ // are present in the response.
+ assertMessageOffset(t, <-consumer.Messages(), 3)
+ assertMessageOffset(t, <-consumer.Messages(), 4)
+
+ safeClose(t, consumer)
+ safeClose(t, master)
+ broker0.Close()
+}
+
+// It is fine if offsets of fetched messages are not sequential (although
+// strictly increasing!).
+func TestConsumerNonSequentialOffsets(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ fetchResponse1 := &FetchResponse{}
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
+ fetchResponse2 := &FetchResponse{}
+ fetchResponse2.AddError("my_topic", 0, ErrNoError)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 1234).
+ SetOffset("my_topic", 0, OffsetOldest, 0),
+ "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
+ })
+
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ consumer, err := master.ConsumePartition("my_topic", 0, 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then: messages with offsets 1 and 2 are not returned even though they
+ // are present in the response.
+ assertMessageOffset(t, <-consumer.Messages(), 5)
+ assertMessageOffset(t, <-consumer.Messages(), 7)
+ assertMessageOffset(t, <-consumer.Messages(), 11)
+
+ safeClose(t, consumer)
+ safeClose(t, master)
+ broker0.Close()
+}
+
+// If leadership for a partition is changing then consumer resolves the new
+// leader and switches to it.
+func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
+ // initial setup
+ seedBroker := NewMockBroker(t, 10)
+ leader0 := NewMockBroker(t, 0)
+ leader1 := NewMockBroker(t, 1)
+
+ seedBroker.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(leader0.Addr(), leader0.BrokerID()).
+ SetBroker(leader1.Addr(), leader1.BrokerID()).
+ SetLeader("my_topic", 0, leader0.BrokerID()).
+ SetLeader("my_topic", 1, leader1.BrokerID()),
+ })
+
+ mockOffsetResponse1 := NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 0).
+ SetOffset("my_topic", 0, OffsetNewest, 1000).
+ SetOffset("my_topic", 1, OffsetOldest, 0).
+ SetOffset("my_topic", 1, OffsetNewest, 1000)
+ leader0.SetHandlerByMap(map[string]MockResponse{
+ "OffsetRequest": mockOffsetResponse1,
+ "FetchRequest": NewMockFetchResponse(t, 1),
+ })
+ leader1.SetHandlerByMap(map[string]MockResponse{
+ "OffsetRequest": mockOffsetResponse1,
+ "FetchRequest": NewMockFetchResponse(t, 1),
+ })
+
+ // launch test goroutines
+ config := NewConfig()
+ config.Consumer.Retry.Backoff = 50
+ master, err := NewConsumer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // we expect to end up (eventually) consuming exactly ten messages on each partition
+ var wg sync.WaitGroup
+ for i := int32(0); i < 2; i++ {
+ consumer, err := master.ConsumePartition("my_topic", i, 0)
+ if err != nil {
+ t.Error(err)
+ }
+
+ go func(c PartitionConsumer) {
+ for err := range c.Errors() {
+ t.Error(err)
+ }
+ }(consumer)
+
+ wg.Add(1)
+ go func(partition int32, c PartitionConsumer) {
+ for i := 0; i < 10; i++ {
+ message := <-consumer.Messages()
+ if message.Offset != int64(i) {
+ t.Error("Incorrect message offset!", i, partition, message.Offset)
+ }
+ if message.Partition != partition {
+ t.Error("Incorrect message partition!")
+ }
+ }
+ safeClose(t, consumer)
+ wg.Done()
+ }(i, consumer)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+ Logger.Printf(" STAGE 1")
+ // Stage 1:
+ // * my_topic/0 -> leader0 serves 4 messages
+ // * my_topic/1 -> leader1 serves 0 messages
+
+ mockFetchResponse := NewMockFetchResponse(t, 1)
+ for i := 0; i < 4; i++ {
+ mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
+ }
+ leader0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": mockFetchResponse,
+ })
+
+ time.Sleep(50 * time.Millisecond)
+ Logger.Printf(" STAGE 2")
+ // Stage 2:
+ // * leader0 says that it is no longer serving my_topic/0
+ // * seedBroker tells that leader1 is serving my_topic/0 now
+
+ // seed broker tells that the new partition 0 leader is leader1
+ seedBroker.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetLeader("my_topic", 0, leader1.BrokerID()).
+ SetLeader("my_topic", 1, leader1.BrokerID()),
+ })
+
+ // leader0 says no longer leader of partition 0
+ fetchResponse := new(FetchResponse)
+ fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
+ leader0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": NewMockWrapper(fetchResponse),
+ })
+
+ time.Sleep(50 * time.Millisecond)
+ Logger.Printf(" STAGE 3")
+ // Stage 3:
+ // * my_topic/0 -> leader1 serves 3 messages
+ // * my_topic/1 -> leader1 server 8 messages
+
+ // leader1 provides 3 message on partition 0, and 8 messages on partition 1
+ mockFetchResponse2 := NewMockFetchResponse(t, 2)
+ for i := 4; i < 7; i++ {
+ mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
+ }
+ for i := 0; i < 8; i++ {
+ mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
+ }
+ leader1.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": mockFetchResponse2,
+ })
+
+ time.Sleep(50 * time.Millisecond)
+ Logger.Printf(" STAGE 4")
+ // Stage 4:
+ // * my_topic/0 -> leader1 serves 3 messages
+ // * my_topic/1 -> leader1 tells that it is no longer the leader
+ // * seedBroker tells that leader0 is a new leader for my_topic/1
+
+ // metadata assigns 0 to leader1 and 1 to leader0
+ seedBroker.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetLeader("my_topic", 0, leader1.BrokerID()).
+ SetLeader("my_topic", 1, leader0.BrokerID()),
+ })
+
+ // leader1 provides three more messages on partition0, says no longer leader of partition1
+ mockFetchResponse3 := NewMockFetchResponse(t, 3).
+ SetMessage("my_topic", 0, int64(7), testMsg).
+ SetMessage("my_topic", 0, int64(8), testMsg).
+ SetMessage("my_topic", 0, int64(9), testMsg)
+ fetchResponse4 := new(FetchResponse)
+ fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
+ leader1.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
+ })
+
+ // leader0 provides two messages on partition 1
+ mockFetchResponse4 := NewMockFetchResponse(t, 2)
+ for i := 8; i < 10; i++ {
+ mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
+ }
+ leader0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": mockFetchResponse4,
+ })
+
+ wg.Wait()
+ safeClose(t, master)
+ leader1.Close()
+ leader0.Close()
+ seedBroker.Close()
+}
+
+// When two partitions have the same broker as the leader, if one partition
+// consumer channel buffer is full then that does not affect the ability to
+// read messages by the other consumer.
+func TestConsumerInterleavedClose(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()).
+ SetLeader("my_topic", 1, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 1000).
+ SetOffset("my_topic", 0, OffsetNewest, 1100).
+ SetOffset("my_topic", 1, OffsetOldest, 2000).
+ SetOffset("my_topic", 1, OffsetNewest, 2100),
+ "FetchRequest": NewMockFetchResponse(t, 1).
+ SetMessage("my_topic", 0, 1000, testMsg).
+ SetMessage("my_topic", 0, 1001, testMsg).
+ SetMessage("my_topic", 0, 1002, testMsg).
+ SetMessage("my_topic", 1, 2000, testMsg),
+ })
+
+ config := NewConfig()
+ config.ChannelBufferSize = 0
+ master, err := NewConsumer([]string{broker0.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c0, err := master.ConsumePartition("my_topic", 0, 1000)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c1, err := master.ConsumePartition("my_topic", 1, 2000)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When/Then: we can read from partition 0 even if nobody reads from partition 1
+ assertMessageOffset(t, <-c0.Messages(), 1000)
+ assertMessageOffset(t, <-c0.Messages(), 1001)
+ assertMessageOffset(t, <-c0.Messages(), 1002)
+
+ safeClose(t, c1)
+ safeClose(t, c0)
+ safeClose(t, master)
+ broker0.Close()
+}
+
+func TestConsumerBounceWithReferenceOpen(t *testing.T) {
+ broker0 := NewMockBroker(t, 0)
+ broker0Addr := broker0.Addr()
+ broker1 := NewMockBroker(t, 1)
+
+ mockMetadataResponse := NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetBroker(broker1.Addr(), broker1.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()).
+ SetLeader("my_topic", 1, broker1.BrokerID())
+
+ mockOffsetResponse := NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetOldest, 1000).
+ SetOffset("my_topic", 0, OffsetNewest, 1100).
+ SetOffset("my_topic", 1, OffsetOldest, 2000).
+ SetOffset("my_topic", 1, OffsetNewest, 2100)
+
+ mockFetchResponse := NewMockFetchResponse(t, 1)
+ for i := 0; i < 10; i++ {
+ mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
+ mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
+ }
+
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "OffsetRequest": mockOffsetResponse,
+ "FetchRequest": mockFetchResponse,
+ })
+ broker1.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": mockMetadataResponse,
+ "OffsetRequest": mockOffsetResponse,
+ "FetchRequest": mockFetchResponse,
+ })
+
+ config := NewConfig()
+ config.Consumer.Return.Errors = true
+ config.Consumer.Retry.Backoff = 100 * time.Millisecond
+ config.ChannelBufferSize = 1
+ master, err := NewConsumer([]string{broker1.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c0, err := master.ConsumePartition("my_topic", 0, 1000)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c1, err := master.ConsumePartition("my_topic", 1, 2000)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // read messages from both partition to make sure that both brokers operate
+ // normally.
+ assertMessageOffset(t, <-c0.Messages(), 1000)
+ assertMessageOffset(t, <-c1.Messages(), 2000)
+
+ // Simulate broker shutdown. Note that metadata response does not change,
+ // that is the leadership does not move to another broker. So partition
+ // consumer will keep retrying to restore the connection with the broker.
+ broker0.Close()
+
+ // Make sure that while the partition/0 leader is down, consumer/partition/1
+ // is capable of pulling messages from broker1.
+ for i := 1; i < 7; i++ {
+ offset := (<-c1.Messages()).Offset
+ if offset != int64(2000+i) {
+ t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
+ }
+ }
+
+ // Bring broker0 back to service.
+ broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "FetchRequest": mockFetchResponse,
+ })
+
+ // Read the rest of messages from both partitions.
+ for i := 7; i < 10; i++ {
+ assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
+ }
+ for i := 1; i < 10; i++ {
+ assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
+ }
+
+ select {
+ case <-c0.Errors():
+ default:
+ t.Errorf("Partition consumer should have detected broker restart")
+ }
+
+ safeClose(t, c1)
+ safeClose(t, c0)
+ safeClose(t, master)
+ broker0.Close()
+ broker1.Close()
+}
+
+func TestConsumerOffsetOutOfRange(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 2)
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 1234).
+ SetOffset("my_topic", 0, OffsetOldest, 2345),
+ })
+
+ master, err := NewConsumer([]string{broker0.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When/Then
+ if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
+ t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
+ }
+ if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
+ t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
+ }
+ if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
+ t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
+ }
+
+ safeClose(t, master)
+ broker0.Close()
+}
+
+func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
+ if msg.Offset != expectedOffset {
+ t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
+ }
+}
+
+// This example shows how to use the consumer to read messages
+// from a single partition.
+func ExampleConsumer() {
+ consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ if err := consumer.Close(); err != nil {
+ log.Fatalln(err)
+ }
+ }()
+
+ partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ if err := partitionConsumer.Close(); err != nil {
+ log.Fatalln(err)
+ }
+ }()
+
+ // Trap SIGINT to trigger a shutdown.
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Interrupt)
+
+ consumed := 0
+ConsumerLoop:
+ for {
+ select {
+ case msg := <-partitionConsumer.Messages():
+ log.Printf("Consumed message offset %d\n", msg.Offset)
+ consumed++
+ case <-signals:
+ break ConsumerLoop
+ }
+ }
+
+ log.Printf("Consumed: %d\n", consumed)
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 000000000..f4fde18ad
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,35 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "hash/crc32"
+)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+ startOffset int
+}
+
+func (c *crc32Field) saveOffset(in int) {
+ c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+ return 4
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+ binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+ return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+
+ if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
+ return PacketDecodingError{"CRC didn't match"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 000000000..1fb356777
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go
new file mode 100644
index 000000000..7d45f3fee
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go
@@ -0,0 +1,34 @@
+package sarama
+
+import "testing"
+
+var (
+ emptyDescribeGroupsRequest = []byte{0, 0, 0, 0}
+
+ singleDescribeGroupsRequest = []byte{
+ 0, 0, 0, 1, // 1 group
+ 0, 3, 'f', 'o', 'o', // group name: foo
+ }
+
+ doubleDescribeGroupsRequest = []byte{
+ 0, 0, 0, 2, // 2 groups
+ 0, 3, 'f', 'o', 'o', // group name: foo
+ 0, 3, 'b', 'a', 'r', // group name: foo
+ }
+)
+
+func TestDescribeGroupsRequest(t *testing.T) {
+ var request *DescribeGroupsRequest
+
+ request = new(DescribeGroupsRequest)
+ testRequest(t, "no groups", request, emptyDescribeGroupsRequest)
+
+ request = new(DescribeGroupsRequest)
+ request.AddGroup("foo")
+ testRequest(t, "one group", request, singleDescribeGroupsRequest)
+
+ request = new(DescribeGroupsRequest)
+ request.AddGroup("foo")
+ request.AddGroup("bar")
+ testRequest(t, "two groups", request, doubleDescribeGroupsRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 000000000..542b3a971
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, groupDescription := range r.Groups {
+ if err := groupDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Groups = make([]*GroupDescription, n)
+ for i := 0; i < n; i++ {
+ r.Groups[i] = new(GroupDescription)
+ if err := r.Groups[i].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+type GroupDescription struct {
+ Err KError
+ GroupId string
+ State string
+ ProtocolType string
+ Protocol string
+ Members map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+ pe.putInt16(int16(gd.Err))
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for memberId, groupMemberDescription := range gd.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := groupMemberDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ gd.Err = KError(kerr)
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ gd.Members = make(map[string]*GroupMemberDescription)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ gd.Members[memberId] = new(GroupMemberDescription)
+ if err := gd.Members[memberId].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupMemberDescription struct {
+ ClientId string
+ ClientHost string
+ MemberMetadata []byte
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(gmd.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+ metadata := new(ConsumerGroupMemberMetadata)
+ err := decode(gmd.MemberMetadata, metadata)
+ return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go
new file mode 100644
index 000000000..dd3973191
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go
@@ -0,0 +1,91 @@
+package sarama
+
+import (
+ "reflect"
+ "testing"
+)
+
+var (
+ describeGroupsResponseEmpty = []byte{
+ 0, 0, 0, 0, // no groups
+ }
+
+ describeGroupsResponsePopulated = []byte{
+ 0, 0, 0, 2, // 2 groups
+
+ 0, 0, // no error
+ 0, 3, 'f', 'o', 'o', // Group ID
+ 0, 3, 'b', 'a', 'r', // State
+ 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type
+ 0, 3, 'b', 'a', 'z', // Protocol name
+ 0, 0, 0, 1, // 1 member
+ 0, 2, 'i', 'd', // Member ID
+ 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID
+ 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata
+ 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment
+
+ 0, 30, // ErrGroupAuthorizationFailed
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, 0, 0,
+ }
+)
+
+func TestDescribeGroupsResponse(t *testing.T) {
+ var response *DescribeGroupsResponse
+
+ response = new(DescribeGroupsResponse)
+ testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0)
+ if len(response.Groups) != 0 {
+ t.Error("Expected no groups")
+ }
+
+ response = new(DescribeGroupsResponse)
+ testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0)
+ if len(response.Groups) != 2 {
+ t.Error("Expected two groups")
+ }
+
+ group0 := response.Groups[0]
+ if group0.Err != ErrNoError {
+ t.Error("Unxpected groups[0].Err, found", group0.Err)
+ }
+ if group0.GroupId != "foo" {
+ t.Error("Unxpected groups[0].GroupId, found", group0.GroupId)
+ }
+ if group0.State != "bar" {
+ t.Error("Unxpected groups[0].State, found", group0.State)
+ }
+ if group0.ProtocolType != "consumer" {
+ t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType)
+ }
+ if group0.Protocol != "baz" {
+ t.Error("Unxpected groups[0].Protocol, found", group0.Protocol)
+ }
+ if len(group0.Members) != 1 {
+ t.Error("Unxpected groups[0].Members, found", group0.Members)
+ }
+ if group0.Members["id"].ClientId != "sarama" {
+ t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId)
+ }
+ if group0.Members["id"].ClientHost != "localhost" {
+ t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost)
+ }
+ if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) {
+ t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata)
+ }
+ if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) {
+ t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment)
+ }
+
+ group1 := response.Groups[1]
+ if group1.Err != ErrGroupAuthorizationFailed {
+ t.Error("Unxpected groups[1].Err, found", group0.Err)
+ }
+ if len(group1.Members) != 0 {
+ t.Error("Unxpected groups[1].Members, found", group0.Members)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 000000000..adcf94213
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,14 @@
+name: sarama
+
+up:
+ - go:
+ version: '1.8'
+
+commands:
+ test:
+ run: make test
+ desc: 'run unit tests'
+
+packages:
+ - git@github.com:Shopify/dev-shopify.git
+
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 000000000..7ce3bc0f6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+ "fmt"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(&prepEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ realEnc.registry = metricRegistry
+ err = e.encode(&realEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+ decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper, version)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 000000000..e6800ed49
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,221 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrNoError KError = 0
+ ErrUnknown KError = -1
+ ErrOffsetOutOfRange KError = 1
+ ErrInvalidMessage KError = 2
+ ErrUnknownTopicOrPartition KError = 3
+ ErrInvalidMessageSize KError = 4
+ ErrLeaderNotAvailable KError = 5
+ ErrNotLeaderForPartition KError = 6
+ ErrRequestTimedOut KError = 7
+ ErrBrokerNotAvailable KError = 8
+ ErrReplicaNotAvailable KError = 9
+ ErrMessageSizeTooLarge KError = 10
+ ErrStaleControllerEpochCode KError = 11
+ ErrOffsetMetadataTooLarge KError = 12
+ ErrNetworkException KError = 13
+ ErrOffsetsLoadInProgress KError = 14
+ ErrConsumerCoordinatorNotAvailable KError = 15
+ ErrNotCoordinatorForConsumer KError = 16
+ ErrInvalidTopic KError = 17
+ ErrMessageSetSizeTooLarge KError = 18
+ ErrNotEnoughReplicas KError = 19
+ ErrNotEnoughReplicasAfterAppend KError = 20
+ ErrInvalidRequiredAcks KError = 21
+ ErrIllegalGeneration KError = 22
+ ErrInconsistentGroupProtocol KError = 23
+ ErrInvalidGroupId KError = 24
+ ErrUnknownMemberId KError = 25
+ ErrInvalidSessionTimeout KError = 26
+ ErrRebalanceInProgress KError = 27
+ ErrInvalidCommitOffsetSize KError = 28
+ ErrTopicAuthorizationFailed KError = 29
+ ErrGroupAuthorizationFailed KError = 30
+ ErrClusterAuthorizationFailed KError = 31
+ ErrInvalidTimestamp KError = 32
+ ErrUnsupportedSASLMechanism KError = 33
+ ErrIllegalSASLState KError = 34
+ ErrUnsupportedVersion KError = 35
+ ErrTopicAlreadyExists KError = 36
+ ErrInvalidPartitions KError = 37
+ ErrInvalidReplicationFactor KError = 38
+ ErrInvalidReplicaAssignment KError = 39
+ ErrInvalidConfig KError = 40
+ ErrNotController KError = 41
+ ErrInvalidRequest KError = 42
+ ErrUnsupportedForMessageFormat KError = 43
+ ErrPolicyViolation KError = 44
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://kafka.apache.org/protocol#protocol_error_codes
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error."
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC."
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker."
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size."
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request."
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica information not available, one or more brokers are down."
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error."
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+ case ErrNetworkException:
+ return "kafka server: The server disconnected before a response was received."
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: Offset's topic has not yet been created."
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic."
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server."
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation."
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members."
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty."
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation."
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range."
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large."
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic."
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group."
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type."
+ case ErrInvalidTimestamp:
+ return "kafka server: The timestamp of the message is out of acceptable range."
+ case ErrUnsupportedSASLMechanism:
+ return "kafka server: The broker does not support the requested SASL mechanism."
+ case ErrIllegalSASLState:
+ return "kafka server: Request is not valid given the current SASL state."
+ case ErrUnsupportedVersion:
+ return "kafka server: The version of API is not supported."
+ case ErrTopicAlreadyExists:
+ return "kafka server: Topic with this name already exists."
+ case ErrInvalidPartitions:
+ return "kafka server: Number of partitions is invalid."
+ case ErrInvalidReplicationFactor:
+ return "kafka server: Replication-factor is invalid."
+ case ErrInvalidReplicaAssignment:
+ return "kafka server: Replica assignment is invalid."
+ case ErrInvalidConfig:
+ return "kafka server: Configuration is invalid."
+ case ErrNotController:
+ return "kafka server: This is not the correct controller for this cluster."
+ case ErrInvalidRequest:
+ return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+ case ErrUnsupportedForMessageFormat:
+ return "kafka server: The requested operation is not supported by the message format version."
+ case ErrPolicyViolation:
+ return "kafka server: Request parameters do not satisfy the configured policy."
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/examples/README.md b/vendor/github.com/Shopify/sarama/examples/README.md
new file mode 100644
index 000000000..85fecefd8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/examples/README.md
@@ -0,0 +1,9 @@
+# Sarama examples
+
+This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
+
+In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
+
+#### HTTP server
+
+[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore b/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
new file mode 100644
index 000000000..9f6ed425f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
@@ -0,0 +1,2 @@
+http_server
+http_server.test
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/README.md b/vendor/github.com/Shopify/sarama/examples/http_server/README.md
new file mode 100644
index 000000000..5ff2bc253
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/examples/http_server/README.md
@@ -0,0 +1,7 @@
+# HTTP server example
+
+This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
+
+If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
+
+One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
new file mode 100644
index 000000000..b6d83c5dc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
@@ -0,0 +1,247 @@
+package main
+
+import (
+ "github.com/Shopify/sarama"
+
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+var (
+ addr = flag.String("addr", ":8080", "The address to bind to")
+ brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
+ verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
+ certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
+ keyFile = flag.String("key", "", "The optional key file for client authentication")
+ caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
+ verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
+)
+
+func main() {
+ flag.Parse()
+
+ if *verbose {
+ sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
+ }
+
+ if *brokers == "" {
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+
+ brokerList := strings.Split(*brokers, ",")
+ log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
+
+ server := &Server{
+ DataCollector: newDataCollector(brokerList),
+ AccessLogProducer: newAccessLogProducer(brokerList),
+ }
+ defer func() {
+ if err := server.Close(); err != nil {
+ log.Println("Failed to close server", err)
+ }
+ }()
+
+ log.Fatal(server.Run(*addr))
+}
+
+func createTlsConfiguration() (t *tls.Config) {
+ if *certFile != "" && *keyFile != "" && *caFile != "" {
+ cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ caCert, err := ioutil.ReadFile(*caFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+
+ t = &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: *verifySsl,
+ }
+ }
+ // will be nil by default if nothing is provided
+ return t
+}
+
+type Server struct {
+ DataCollector sarama.SyncProducer
+ AccessLogProducer sarama.AsyncProducer
+}
+
+func (s *Server) Close() error {
+ if err := s.DataCollector.Close(); err != nil {
+ log.Println("Failed to shut down data collector cleanly", err)
+ }
+
+ if err := s.AccessLogProducer.Close(); err != nil {
+ log.Println("Failed to shut down access log producer cleanly", err)
+ }
+
+ return nil
+}
+
+func (s *Server) Handler() http.Handler {
+ return s.withAccessLog(s.collectQueryStringData())
+}
+
+func (s *Server) Run(addr string) error {
+ httpServer := &http.Server{
+ Addr: addr,
+ Handler: s.Handler(),
+ }
+
+ log.Printf("Listening for requests on %s...\n", addr)
+ return httpServer.ListenAndServe()
+}
+
+func (s *Server) collectQueryStringData() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ // We are not setting a message key, which means that all messages will
+ // be distributed randomly over the different partitions.
+ partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
+ Topic: "important",
+ Value: sarama.StringEncoder(r.URL.RawQuery),
+ })
+
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Failed to store your data:, %s", err)
+ } else {
+ // The tuple (topic, partition, offset) can be used as a unique identifier
+ // for a message in a Kafka cluster.
+ fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
+ }
+ })
+}
+
+type accessLogEntry struct {
+ Method string `json:"method"`
+ Host string `json:"host"`
+ Path string `json:"path"`
+ IP string `json:"ip"`
+ ResponseTime float64 `json:"response_time"`
+
+ encoded []byte
+ err error
+}
+
+func (ale *accessLogEntry) ensureEncoded() {
+ if ale.encoded == nil && ale.err == nil {
+ ale.encoded, ale.err = json.Marshal(ale)
+ }
+}
+
+func (ale *accessLogEntry) Length() int {
+ ale.ensureEncoded()
+ return len(ale.encoded)
+}
+
+func (ale *accessLogEntry) Encode() ([]byte, error) {
+ ale.ensureEncoded()
+ return ale.encoded, ale.err
+}
+
+func (s *Server) withAccessLog(next http.Handler) http.Handler {
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ started := time.Now()
+
+ next.ServeHTTP(w, r)
+
+ entry := &accessLogEntry{
+ Method: r.Method,
+ Host: r.Host,
+ Path: r.RequestURI,
+ IP: r.RemoteAddr,
+ ResponseTime: float64(time.Since(started)) / float64(time.Second),
+ }
+
+ // We will use the client's IP address as key. This will cause
+ // all the access log entries of the same IP address to end up
+ // on the same partition.
+ s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
+ Topic: "access_log",
+ Key: sarama.StringEncoder(r.RemoteAddr),
+ Value: entry,
+ }
+ })
+}
+
+func newDataCollector(brokerList []string) sarama.SyncProducer {
+
+ // For the data collector, we are looking for strong consistency semantics.
+ // Because we don't change the flush settings, sarama will try to produce messages
+ // as fast as possible to keep latency low.
+ config := sarama.NewConfig()
+ config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
+ config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
+ config.Producer.Return.Successes = true
+ tlsConfig := createTlsConfiguration()
+ if tlsConfig != nil {
+ config.Net.TLS.Config = tlsConfig
+ config.Net.TLS.Enable = true
+ }
+
+ // On the broker side, you may want to change the following settings to get
+ // stronger consistency guarantees:
+ // - For your broker, set `unclean.leader.election.enable` to false
+ // - For the topic, you could increase `min.insync.replicas`.
+
+ producer, err := sarama.NewSyncProducer(brokerList, config)
+ if err != nil {
+ log.Fatalln("Failed to start Sarama producer:", err)
+ }
+
+ return producer
+}
+
+func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
+
+ // For the access log, we are looking for AP semantics, with high throughput.
+ // By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
+ config := sarama.NewConfig()
+ tlsConfig := createTlsConfiguration()
+ if tlsConfig != nil {
+ config.Net.TLS.Enable = true
+ config.Net.TLS.Config = tlsConfig
+ }
+ config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
+ config.Producer.Compression = sarama.CompressionSnappy // Compress messages
+ config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
+
+ producer, err := sarama.NewAsyncProducer(brokerList, config)
+ if err != nil {
+ log.Fatalln("Failed to start Sarama producer:", err)
+ }
+
+ // We will just log to STDOUT if we're not able to produce messages.
+ // Note: messages will only be returned here after all retry attempts are exhausted.
+ go func() {
+ for err := range producer.Errors() {
+ log.Println("Failed to write access log entry:", err)
+ }
+ }()
+
+ return producer
+}
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
new file mode 100644
index 000000000..7b2451e28
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Shopify/sarama"
+ "github.com/Shopify/sarama/mocks"
+)
+
+// In normal operation, we expect one access log entry,
+// and one data collector entry. Let's assume both will succeed.
+// We should return a HTTP 200 status.
+func TestCollectSuccessfully(t *testing.T) {
+ dataCollectorMock := mocks.NewSyncProducer(t, nil)
+ dataCollectorMock.ExpectSendMessageAndSucceed()
+
+ accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
+ accessLogProducerMock.ExpectInputAndSucceed()
+
+ // Now, use dependency injection to use the mocks.
+ s := &Server{
+ DataCollector: dataCollectorMock,
+ AccessLogProducer: accessLogProducerMock,
+ }
+
+ // The Server's Close call is important; it will call Close on
+ // the two mock producers, which will then validate whether all
+ // expectations are resolved.
+ defer safeClose(t, s)
+
+ req, err := http.NewRequest("GET", "http://example.com/?data", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := httptest.NewRecorder()
+ s.Handler().ServeHTTP(res, req)
+
+ if res.Code != 200 {
+ t.Errorf("Expected HTTP status 200, found %d", res.Code)
+ }
+
+ if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
+ t.Error("Unexpected response body", res.Body)
+ }
+}
+
+// Now, let's see if we handle the case of not being able to produce
+// to the data collector properly. In this case we should return a 500 status.
+func TestCollectionFailure(t *testing.T) {
+ dataCollectorMock := mocks.NewSyncProducer(t, nil)
+ dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
+
+ accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
+ accessLogProducerMock.ExpectInputAndSucceed()
+
+ s := &Server{
+ DataCollector: dataCollectorMock,
+ AccessLogProducer: accessLogProducerMock,
+ }
+ defer safeClose(t, s)
+
+ req, err := http.NewRequest("GET", "http://example.com/?data", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := httptest.NewRecorder()
+ s.Handler().ServeHTTP(res, req)
+
+ if res.Code != 500 {
+ t.Errorf("Expected HTTP status 500, found %d", res.Code)
+ }
+}
+
+// We don't expect any data collector calls because the path is wrong,
+// so we are not setting any expectations on the dataCollectorMock. It
+// will still generate an access log entry though.
+func TestWrongPath(t *testing.T) {
+ dataCollectorMock := mocks.NewSyncProducer(t, nil)
+
+ accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
+ accessLogProducerMock.ExpectInputAndSucceed()
+
+ s := &Server{
+ DataCollector: dataCollectorMock,
+ AccessLogProducer: accessLogProducerMock,
+ }
+ defer safeClose(t, s)
+
+ req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := httptest.NewRecorder()
+
+ s.Handler().ServeHTTP(res, req)
+
+ if res.Code != 404 {
+ t.Errorf("Expected HTTP status 404, found %d", res.Code)
+ }
+}
+
+func safeClose(t *testing.T, o io.Closer) {
+ if err := o.Close(); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 000000000..65600e86e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,150 @@
+package sarama
+
+type fetchRequestBlock struct {
+ fetchOffset int64
+ maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(b.fetchOffset)
+ pe.putInt32(b.maxBytes)
+ return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+ if b.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if b.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+ MaxWaitTime int32
+ MinBytes int32
+ MaxBytes int32
+ Version int16
+ blocks map[string]map[int32]*fetchRequestBlock
+}
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ pe.putInt32(r.MaxWaitTime)
+ pe.putInt32(r.MinBytes)
+ if r.Version == 3 {
+ pe.putInt32(r.MaxBytes)
+ }
+ err = pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version == 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = fetchBlock
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) key() int16 {
+ return 1
+}
+
+func (r *FetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request_test.go b/vendor/github.com/Shopify/sarama/fetch_request_test.go
new file mode 100644
index 000000000..32c083c7d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request_test.go
@@ -0,0 +1,34 @@
+package sarama
+
+import "testing"
+
+var (
+ fetchRequestNoBlocks = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+
+ fetchRequestWithProperties = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
+ 0x00, 0x00, 0x00, 0x00}
+
+ fetchRequestOneBlock = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
+)
+
+func TestFetchRequest(t *testing.T) {
+ request := new(FetchRequest)
+ testRequest(t, "no blocks", request, fetchRequestNoBlocks)
+
+ request.MaxWaitTime = 0x20
+ request.MinBytes = 0xEF
+ testRequest(t, "with properties", request, fetchRequestWithProperties)
+
+ request.MaxWaitTime = 0
+ request.MinBytes = 0
+ request.AddBlock("topic", 0x12, 0x34, 0x56)
+ testRequest(t, "one block", request, fetchRequestOneBlock)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 000000000..b56b166c2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,210 @@
+package sarama
+
+import "time"
+
+type FetchResponseBlock struct {
+ Err KError
+ HighWaterMarkOffset int64
+ MsgSet MessageSet
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ msgSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ msgSetDecoder, err := pd.getSubset(int(msgSetSize))
+ if err != nil {
+ return err
+ }
+ err = (&b.MsgSet).decode(msgSetDecoder)
+
+ return err
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ pe.putInt64(b.HighWaterMarkOffset)
+
+ pe.push(&lengthField{})
+ err = b.MsgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+type FetchResponse struct {
+ Blocks map[string]map[int32]*FetchResponseBlock
+ ThrottleTime time.Duration
+ Version int16 // v1 requires 0.9+, v2 requires 0.10+
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.Version >= 1 {
+ throttle, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func (r *FetchResponse) key() int16 {
+ return 1
+}
+
+func (r *FetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+ msg := &Message{Key: kb, Value: vb}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response_test.go b/vendor/github.com/Shopify/sarama/fetch_response_test.go
new file mode 100644
index 000000000..52fb5a74c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response_test.go
@@ -0,0 +1,84 @@
+package sarama
+
+import (
+ "bytes"
+ "testing"
+)
+
+var (
+ emptyFetchResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+
+ oneMessageFetchResponse = []byte{
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x00, 0x00, 0x1C,
+ // messageSet
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10,
+ // message
+ 0x23, 0x96, 0x4a, 0xf7, // CRC
+ 0x00,
+ 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
+)
+
+func TestEmptyFetchResponse(t *testing.T) {
+ response := FetchResponse{}
+ testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0)
+
+ if len(response.Blocks) != 0 {
+ t.Error("Decoding produced topic blocks where there were none.")
+ }
+
+}
+
+func TestOneMessageFetchResponse(t *testing.T) {
+ response := FetchResponse{}
+ testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0)
+
+ if len(response.Blocks) != 1 {
+ t.Fatal("Decoding produced incorrect number of topic blocks.")
+ }
+
+ if len(response.Blocks["topic"]) != 1 {
+ t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
+ }
+
+ block := response.GetBlock("topic", 5)
+ if block == nil {
+ t.Fatal("GetBlock didn't return block.")
+ }
+ if block.Err != ErrOffsetOutOfRange {
+ t.Error("Decoding didn't produce correct error code.")
+ }
+ if block.HighWaterMarkOffset != 0x10101010 {
+ t.Error("Decoding didn't produce correct high water mark offset.")
+ }
+ if block.MsgSet.PartialTrailingMessage {
+ t.Error("Decoding detected a partial trailing message where there wasn't one.")
+ }
+
+ if len(block.MsgSet.Messages) != 1 {
+ t.Fatal("Decoding produced incorrect number of messages.")
+ }
+ msgBlock := block.MsgSet.Messages[0]
+ if msgBlock.Offset != 0x550000 {
+ t.Error("Decoding produced incorrect message offset.")
+ }
+ msg := msgBlock.Msg
+ if msg.Codec != CompressionNone {
+ t.Error("Decoding produced incorrect message compression.")
+ }
+ if msg.Key != nil {
+ t.Error("Decoding produced message key where there was none.")
+ }
+ if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
+ t.Error("Decoding produced incorrect message value.")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/functional_client_test.go b/vendor/github.com/Shopify/sarama/functional_client_test.go
new file mode 100644
index 000000000..2bf99d252
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/functional_client_test.go
@@ -0,0 +1,90 @@
+package sarama
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestFuncConnectionFailure(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ Proxies["kafka1"].Enabled = false
+ SaveProxy(t, "kafka1")
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 1
+
+ _, err := NewClient([]string{kafkaBrokers[0]}, config)
+ if err != ErrOutOfBrokers {
+ t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
+ }
+}
+
+func TestFuncClientMetadata(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 1
+ config.Metadata.Retry.Backoff = 10 * time.Millisecond
+ client, err := NewClient(kafkaBrokers, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, got", err)
+ }
+
+ if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, got", err)
+ }
+
+ if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, got", err)
+ }
+
+ partitions, err := client.Partitions("test.4")
+ if err != nil {
+ t.Error(err)
+ }
+ if len(partitions) != 4 {
+ t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions)
+ }
+
+ partitions, err = client.Partitions("test.1")
+ if err != nil {
+ t.Error(err)
+ }
+ if len(partitions) != 1 {
+ t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions)
+ }
+
+ safeClose(t, client)
+}
+
+func TestFuncClientCoordinator(t *testing.T) {
+ checkKafkaVersion(t, "0.8.2")
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ client, err := NewClient(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if connected, err := broker.Connected(); !connected || err != nil {
+ t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr())
+ }
+ }
+
+ safeClose(t, client)
+}
diff --git a/vendor/github.com/Shopify/sarama/functional_consumer_test.go b/vendor/github.com/Shopify/sarama/functional_consumer_test.go
new file mode 100644
index 000000000..ab8433109
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/functional_consumer_test.go
@@ -0,0 +1,61 @@
+package sarama
+
+import (
+ "math"
+ "testing"
+)
+
+func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ consumer, err := NewConsumer(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange {
+ t.Error("Expected ErrOffsetOutOfRange, got:", err)
+ }
+
+ if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange {
+ t.Error("Expected ErrOffsetOutOfRange, got:", err)
+ }
+
+ safeClose(t, consumer)
+}
+
+func TestConsumerHighWaterMarkOffset(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ p, err := NewSyncProducer(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer safeClose(t, p)
+
+ _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c, err := NewConsumer(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer safeClose(t, c)
+
+ pc, err := c.ConsumePartition("test.1", 0, OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ <-pc.Messages()
+
+ if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 {
+ t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo)
+ }
+
+ safeClose(t, pc)
+}
diff --git a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go
new file mode 100644
index 000000000..436f35ef4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go
@@ -0,0 +1,47 @@
+package sarama
+
+import (
+ "testing"
+)
+
+func TestFuncOffsetManager(t *testing.T) {
+ checkKafkaVersion(t, "0.8.2")
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ client, err := NewClient(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pom1, err := offsetManager.ManagePartition("test.1", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pom1.MarkOffset(10, "test metadata")
+ safeClose(t, pom1)
+
+ pom2, err := offsetManager.ManagePartition("test.1", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ offset, metadata := pom2.NextOffset()
+
+ if offset != 10 {
+ t.Errorf("Expected the next offset to be 10, found %d.", offset)
+ }
+ if metadata != "test metadata" {
+ t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata)
+ }
+
+ safeClose(t, pom2)
+ safeClose(t, offsetManager)
+ safeClose(t, client)
+}
diff --git a/vendor/github.com/Shopify/sarama/functional_producer_test.go b/vendor/github.com/Shopify/sarama/functional_producer_test.go
new file mode 100644
index 000000000..91bf3b5ee
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/functional_producer_test.go
@@ -0,0 +1,323 @@
+package sarama
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ toxiproxy "github.com/Shopify/toxiproxy/client"
+ "github.com/rcrowley/go-metrics"
+)
+
+const TestBatchSize = 1000
+
+func TestFuncProducing(t *testing.T) {
+ config := NewConfig()
+ testProducingMessages(t, config)
+}
+
+func TestFuncProducingGzip(t *testing.T) {
+ config := NewConfig()
+ config.Producer.Compression = CompressionGZIP
+ testProducingMessages(t, config)
+}
+
+func TestFuncProducingSnappy(t *testing.T) {
+ config := NewConfig()
+ config.Producer.Compression = CompressionSnappy
+ testProducingMessages(t, config)
+}
+
+func TestFuncProducingNoResponse(t *testing.T) {
+ config := NewConfig()
+ config.Producer.RequiredAcks = NoResponse
+ testProducingMessages(t, config)
+}
+
+func TestFuncProducingFlushing(t *testing.T) {
+ config := NewConfig()
+ config.Producer.Flush.Messages = TestBatchSize / 8
+ config.Producer.Flush.Frequency = 250 * time.Millisecond
+ testProducingMessages(t, config)
+}
+
+func TestFuncMultiPartitionProduce(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ config := NewConfig()
+ config.ChannelBufferSize = 20
+ config.Producer.Flush.Frequency = 50 * time.Millisecond
+ config.Producer.Flush.Messages = 200
+ config.Producer.Return.Successes = true
+ producer, err := NewSyncProducer(kafkaBrokers, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(TestBatchSize)
+
+ for i := 1; i <= TestBatchSize; i++ {
+ go func(i int) {
+ defer wg.Done()
+ msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
+ if _, _, err := producer.SendMessage(msg); err != nil {
+ t.Error(i, err)
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ if err := producer.Close(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFuncProducingToInvalidTopic(t *testing.T) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ producer, err := NewSyncProducer(kafkaBrokers, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, found", err)
+ }
+
+ if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
+ t.Error("Expected ErrUnknownTopicOrPartition, found", err)
+ }
+
+ safeClose(t, producer)
+}
+
+func testProducingMessages(t *testing.T, config *Config) {
+ setupFunctionalTest(t)
+ defer teardownFunctionalTest(t)
+
+ // Configure some latency in order to properly validate the request latency metric
+ for _, proxy := range Proxies {
+ if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil {
+ t.Fatal("Unable to configure latency toxicity", err)
+ }
+ }
+
+ config.Producer.Return.Successes = true
+ config.Consumer.Return.Errors = true
+
+ client, err := NewClient(kafkaBrokers, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Keep in mind the current offset
+ initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ producer, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedResponses := TestBatchSize
+ for i := 1; i <= TestBatchSize; {
+ msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
+ select {
+ case producer.Input() <- msg:
+ i++
+ case ret := <-producer.Errors():
+ t.Fatal(ret.Err)
+ case <-producer.Successes():
+ expectedResponses--
+ }
+ }
+ for expectedResponses > 0 {
+ select {
+ case ret := <-producer.Errors():
+ t.Fatal(ret.Err)
+ case <-producer.Successes():
+ expectedResponses--
+ }
+ }
+ safeClose(t, producer)
+
+ // Validate producer metrics before using the consumer minus the offset request
+ validateMetrics(t, client)
+
+ master, err := NewConsumerFromClient(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+ consumer, err := master.ConsumePartition("test.1", 0, initialOffset)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 1; i <= TestBatchSize; i++ {
+ select {
+ case <-time.After(10 * time.Second):
+ t.Fatal("Not received any more events in the last 10 seconds.")
+
+ case err := <-consumer.Errors():
+ t.Error(err)
+
+ case message := <-consumer.Messages():
+ if string(message.Value) != fmt.Sprintf("testing %d", i) {
+ t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
+ }
+ }
+
+ }
+ safeClose(t, consumer)
+ safeClose(t, client)
+}
+
+func validateMetrics(t *testing.T, client Client) {
+ // Get the broker used by test1 topic
+ var broker *Broker
+ if partitions, err := client.Partitions("test.1"); err != nil {
+ t.Error(err)
+ } else {
+ for _, partition := range partitions {
+ if b, err := client.Leader("test.1", partition); err != nil {
+ t.Error(err)
+ } else {
+ if broker != nil && b != broker {
+ t.Fatal("Expected only one broker, got at least 2")
+ }
+ broker = b
+ }
+ }
+ }
+
+ metricValidators := newMetricValidators()
+ noResponse := client.Config().Producer.RequiredAcks == NoResponse
+ compressionEnabled := client.Config().Producer.Compression != CompressionNone
+
+ // We are adding 10ms of latency to all requests with toxiproxy
+ minRequestLatencyInMs := 10
+ if noResponse {
+ // but when we do not wait for a response it can be less than 1ms
+ minRequestLatencyInMs = 0
+ }
+
+ // We read at least 1 byte from the broker
+ metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1))
+ // in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)
+ metricValidators.register(minCountMeterValidator("request-rate", 3))
+ metricValidators.register(minCountHistogramValidator("request-size", 3))
+ metricValidators.register(minValHistogramValidator("request-size", 1))
+ metricValidators.register(minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
+ // and at least 2 requests to the registered broker (offset + produces)
+ metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2))
+ metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2))
+ metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1))
+ metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
+
+ // We send at least 1 batch
+ metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1))
+ metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1))
+ if compressionEnabled {
+ // We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record
+ metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1))
+ metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50))
+ metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000))
+ } else {
+ // We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record
+ metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize))
+ metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100))
+ metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100))
+ }
+
+ // We send exactly TestBatchSize messages
+ metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize))
+ // We send at least one record per request
+ metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1))
+ metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1))
+
+ // We receive at least 1 byte from the broker
+ metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1))
+ if noResponse {
+ // in exactly 2 global responses (metadata + offset)
+ metricValidators.register(countMeterValidator("response-rate", 2))
+ metricValidators.register(minCountHistogramValidator("response-size", 2))
+ metricValidators.register(minValHistogramValidator("response-size", 1))
+ // and exactly 1 offset response for the registered broker
+ metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1))
+ metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1))
+ metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
+ } else {
+ // in at least 3 global responses (metadata + offset + produces)
+ metricValidators.register(minCountMeterValidator("response-rate", 3))
+ metricValidators.register(minCountHistogramValidator("response-size", 3))
+ metricValidators.register(minValHistogramValidator("response-size", 1))
+ // and at least 2 for the registered broker
+ metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2))
+ metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2))
+ metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
+ }
+
+ // Run the validators
+ metricValidators.run(t, client.Config().MetricRegistry)
+}
+
+// Benchmarks
+
+func BenchmarkProducerSmall(b *testing.B) {
+ benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128)))
+}
+func BenchmarkProducerMedium(b *testing.B) {
+ benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024)))
+}
+func BenchmarkProducerLarge(b *testing.B) {
+ benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192)))
+}
+func BenchmarkProducerSmallSinglePartition(b *testing.B) {
+ benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128)))
+}
+func BenchmarkProducerMediumSnappy(b *testing.B) {
+ conf := NewConfig()
+ conf.Producer.Compression = CompressionSnappy
+ benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024)))
+}
+
+func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {
+ setupFunctionalTest(b)
+ defer teardownFunctionalTest(b)
+
+ metricsDisable := os.Getenv("METRICS_DISABLE")
+ if metricsDisable != "" {
+ previousUseNilMetrics := metrics.UseNilMetrics
+ Logger.Println("Disabling metrics using no-op implementation")
+ metrics.UseNilMetrics = true
+ // Restore previous setting
+ defer func() {
+ metrics.UseNilMetrics = previousUseNilMetrics
+ }()
+ }
+
+ producer, err := NewAsyncProducer(kafkaBrokers, conf)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+
+ for i := 1; i <= b.N; {
+ msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value}
+ select {
+ case producer.Input() <- msg:
+ i++
+ case ret := <-producer.Errors():
+ b.Fatal(ret.Err)
+ }
+ }
+ safeClose(b, producer)
+}
diff --git a/vendor/github.com/Shopify/sarama/functional_test.go b/vendor/github.com/Shopify/sarama/functional_test.go
new file mode 100644
index 000000000..846eb29f9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/functional_test.go
@@ -0,0 +1,148 @@
+package sarama
+
+import (
+ "log"
+ "math/rand"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ toxiproxy "github.com/Shopify/toxiproxy/client"
+)
+
+const (
+ VagrantToxiproxy = "http://192.168.100.67:8474"
+ VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095"
+ VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
+)
+
+var (
+ kafkaAvailable, kafkaRequired bool
+ kafkaBrokers []string
+
+ proxyClient *toxiproxy.Client
+ Proxies map[string]*toxiproxy.Proxy
+ ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"}
+ KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"}
+)
+
+func init() {
+ if os.Getenv("DEBUG") == "true" {
+ Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
+ }
+
+ seed := time.Now().UTC().UnixNano()
+ if tmp := os.Getenv("TEST_SEED"); tmp != "" {
+ seed, _ = strconv.ParseInt(tmp, 0, 64)
+ }
+ Logger.Println("Using random seed:", seed)
+ rand.Seed(seed)
+
+ proxyAddr := os.Getenv("TOXIPROXY_ADDR")
+ if proxyAddr == "" {
+ proxyAddr = VagrantToxiproxy
+ }
+ proxyClient = toxiproxy.NewClient(proxyAddr)
+
+ kafkaPeers := os.Getenv("KAFKA_PEERS")
+ if kafkaPeers == "" {
+ kafkaPeers = VagrantKafkaPeers
+ }
+ kafkaBrokers = strings.Split(kafkaPeers, ",")
+
+ if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil {
+ if err = c.Close(); err == nil {
+ kafkaAvailable = true
+ }
+ }
+
+ kafkaRequired = os.Getenv("CI") != ""
+}
+
+func checkKafkaAvailability(t testing.TB) {
+ if !kafkaAvailable {
+ if kafkaRequired {
+ t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
+ } else {
+ t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
+ }
+ }
+}
+
+func checkKafkaVersion(t testing.TB, requiredVersion string) {
+ kafkaVersion := os.Getenv("KAFKA_VERSION")
+ if kafkaVersion == "" {
+ t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion)
+ } else {
+ available := parseKafkaVersion(kafkaVersion)
+ required := parseKafkaVersion(requiredVersion)
+ if !available.satisfies(required) {
+ t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
+ }
+ }
+}
+
+func resetProxies(t testing.TB) {
+ if err := proxyClient.ResetState(); err != nil {
+ t.Error(err)
+ }
+ Proxies = nil
+}
+
+func fetchProxies(t testing.TB) {
+ var err error
+ Proxies, err = proxyClient.Proxies()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func SaveProxy(t *testing.T, px string) {
+ if err := Proxies[px].Save(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func setupFunctionalTest(t testing.TB) {
+ checkKafkaAvailability(t)
+ resetProxies(t)
+ fetchProxies(t)
+}
+
+func teardownFunctionalTest(t testing.TB) {
+ resetProxies(t)
+}
+
+type kafkaVersion []int
+
+func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
+ var ov int
+ for index, v := range kv {
+ if len(other) <= index {
+ ov = 0
+ } else {
+ ov = other[index]
+ }
+
+ if v < ov {
+ return false
+ } else if v > ov {
+ return true
+ }
+ }
+ return true
+}
+
+func parseKafkaVersion(version string) kafkaVersion {
+ numbers := strings.Split(version, ".")
+ result := make(kafkaVersion, 0, len(numbers))
+ for _, number := range numbers {
+ nr, _ := strconv.Atoi(number)
+ result = append(result, nr)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 000000000..ce49c4739
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go
new file mode 100644
index 000000000..da6cd18f5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "testing"
+
+var (
+ basicHeartbeatRequest = []byte{
+ 0, 3, 'f', 'o', 'o', // Group ID
+ 0x00, 0x01, 0x02, 0x03, // Generatiuon ID
+ 0, 3, 'b', 'a', 'z', // Member ID
+ }
+)
+
+func TestHeartbeatRequest(t *testing.T) {
+ var request *HeartbeatRequest
+
+ request = new(HeartbeatRequest)
+ request.GroupId = "foo"
+ request.GenerationId = 66051
+ request.MemberId = "baz"
+ testRequest(t, "basic", request, basicHeartbeatRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 000000000..766f5fdec
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+ Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go
new file mode 100644
index 000000000..5bcbec985
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go
@@ -0,0 +1,18 @@
+package sarama
+
+import "testing"
+
+var (
+ heartbeatResponseNoError = []byte{
+ 0x00, 0x00}
+)
+
+func TestHeartbeatResponse(t *testing.T) {
+ var response *HeartbeatResponse
+
+ response = new(HeartbeatResponse)
+ testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding error failed: no error expected but found", response.Err)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 000000000..3a7ba1712
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,143 @@
+package sarama
+
+type GroupProtocol struct {
+ Name string
+ Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+ p.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ p.Metadata, err = pd.getBytes()
+ return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(p.Name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(p.Metadata); err != nil {
+ return err
+ }
+ return nil
+}
+
+type JoinGroupRequest struct {
+ GroupId string
+ SessionTimeout int32
+ MemberId string
+ ProtocolType string
+ GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
+ OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if len(r.GroupProtocols) > 0 {
+ if len(r.OrderedGroupProtocols) > 0 {
+ return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+ return err
+ }
+ for _, protocol := range r.OrderedGroupProtocols {
+ if err := protocol.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ protocol := &GroupProtocol{}
+ if err := protocol.decode(pd); err != nil {
+ return err
+ }
+ r.GroupProtocols[protocol.Name] = protocol.Metadata
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+ Name: name,
+ Metadata: metadata,
+ })
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+ bin, err := encode(metadata, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupProtocol(name, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request_test.go b/vendor/github.com/Shopify/sarama/join_group_request_test.go
new file mode 100644
index 000000000..1ba3308bb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request_test.go
@@ -0,0 +1,57 @@
+package sarama
+
+import "testing"
+
+var (
+ joinGroupRequestNoProtocols = []byte{
+ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
+ 0, 0, 0, 100, // Session timeout
+ 0, 0, // Member ID
+ 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
+ 0, 0, 0, 0, // 0 protocol groups
+ }
+
+ joinGroupRequestOneProtocol = []byte{
+ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
+ 0, 0, 0, 100, // Session timeout
+ 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
+ 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
+ 0, 0, 0, 1, // 1 group protocol
+ 0, 3, 'o', 'n', 'e', // Protocol name
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
+ }
+)
+
+func TestJoinGroupRequest(t *testing.T) {
+ request := new(JoinGroupRequest)
+ request.GroupId = "TestGroup"
+ request.SessionTimeout = 100
+ request.ProtocolType = "consumer"
+ testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
+}
+
+func TestJoinGroupRequestOneProtocol(t *testing.T) {
+ request := new(JoinGroupRequest)
+ request.GroupId = "TestGroup"
+ request.SessionTimeout = 100
+ request.MemberId = "OneProtocol"
+ request.ProtocolType = "consumer"
+ request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
+ packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
+ request.GroupProtocols = make(map[string][]byte)
+ request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
+ testRequestDecode(t, "one protocol", request, packet)
+}
+
+func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
+ request := new(JoinGroupRequest)
+ request.GroupId = "TestGroup"
+ request.SessionTimeout = 100
+ request.MemberId = "OneProtocol"
+ request.ProtocolType = "consumer"
+ request.GroupProtocols = make(map[string][]byte)
+ request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
+ packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
+ request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
+ testRequestDecode(t, "one protocol", request, packet)
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 000000000..6d35fe364
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,115 @@
+package sarama
+
+type JoinGroupResponse struct {
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+ members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+ for id, bin := range r.Members {
+ meta := new(ConsumerGroupMemberMetadata)
+ if err := decode(bin, meta); err != nil {
+ return nil, err
+ }
+ members[id] = *meta
+ }
+ return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for memberId, memberMetadata := range r.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(memberMetadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Members = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[memberId] = memberMetadata
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response_test.go b/vendor/github.com/Shopify/sarama/join_group_response_test.go
new file mode 100644
index 000000000..ba7f71f20
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response_test.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+ "reflect"
+ "testing"
+)
+
+var (
+ joinGroupResponseNoError = []byte{
+ 0x00, 0x00, // No error
+ 0x00, 0x01, 0x02, 0x03, // Generation ID
+ 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
+ 0, 3, 'f', 'o', 'o', // Leader ID
+ 0, 3, 'b', 'a', 'r', // Member ID
+ 0, 0, 0, 0, // No member info
+ }
+
+ joinGroupResponseWithError = []byte{
+ 0, 23, // Error: inconsistent group protocol
+ 0x00, 0x00, 0x00, 0x00, // Generation ID
+ 0, 0, // Protocol name chosen
+ 0, 0, // Leader ID
+ 0, 0, // Member ID
+ 0, 0, 0, 0, // No member info
+ }
+
+ joinGroupResponseLeader = []byte{
+ 0x00, 0x00, // No error
+ 0x00, 0x01, 0x02, 0x03, // Generation ID
+ 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
+ 0, 3, 'f', 'o', 'o', // Leader ID
+ 0, 3, 'f', 'o', 'o', // Member ID == Leader ID
+ 0, 0, 0, 1, // 1 member
+ 0, 3, 'f', 'o', 'o', // Member ID
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata
+ }
+)
+
+func TestJoinGroupResponse(t *testing.T) {
+ var response *JoinGroupResponse
+
+ response = new(JoinGroupResponse)
+ testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding Err failed: no error expected but found", response.Err)
+ }
+ if response.GenerationId != 66051 {
+ t.Error("Decoding GenerationId failed, found:", response.GenerationId)
+ }
+ if response.LeaderId != "foo" {
+ t.Error("Decoding LeaderId failed, found:", response.LeaderId)
+ }
+ if response.MemberId != "bar" {
+ t.Error("Decoding MemberId failed, found:", response.MemberId)
+ }
+ if len(response.Members) != 0 {
+ t.Error("Decoding Members failed, found:", response.Members)
+ }
+
+ response = new(JoinGroupResponse)
+ testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0)
+ if response.Err != ErrInconsistentGroupProtocol {
+ t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err)
+ }
+ if response.GenerationId != 0 {
+ t.Error("Decoding GenerationId failed, found:", response.GenerationId)
+ }
+ if response.LeaderId != "" {
+ t.Error("Decoding LeaderId failed, found:", response.LeaderId)
+ }
+ if response.MemberId != "" {
+ t.Error("Decoding MemberId failed, found:", response.MemberId)
+ }
+ if len(response.Members) != 0 {
+ t.Error("Decoding Members failed, found:", response.Members)
+ }
+
+ response = new(JoinGroupResponse)
+ testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding Err failed: ErrNoError expected but found", response.Err)
+ }
+ if response.GenerationId != 66051 {
+ t.Error("Decoding GenerationId failed, found:", response.GenerationId)
+ }
+ if response.LeaderId != "foo" {
+ t.Error("Decoding LeaderId failed, found:", response.LeaderId)
+ }
+ if response.MemberId != "foo" {
+ t.Error("Decoding MemberId failed, found:", response.MemberId)
+ }
+ if len(response.Members) != 1 {
+ t.Error("Decoding Members failed, found:", response.Members)
+ }
+ if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) {
+ t.Error("Decoding foo member failed, found:", response.Members["foo"])
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 000000000..e17742748
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+ GroupId string
+ MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request_test.go b/vendor/github.com/Shopify/sarama/leave_group_request_test.go
new file mode 100644
index 000000000..c1fed6d25
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request_test.go
@@ -0,0 +1,19 @@
+package sarama
+
+import "testing"
+
+var (
+ basicLeaveGroupRequest = []byte{
+ 0, 3, 'f', 'o', 'o',
+ 0, 3, 'b', 'a', 'r',
+ }
+)
+
+func TestLeaveGroupRequest(t *testing.T) {
+ var request *LeaveGroupRequest
+
+ request = new(LeaveGroupRequest)
+ request.GroupId = "foo"
+ request.MemberId = "bar"
+ testRequest(t, "basic", request, basicLeaveGroupRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 000000000..d60c626da
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+ Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response_test.go b/vendor/github.com/Shopify/sarama/leave_group_response_test.go
new file mode 100644
index 000000000..9207c6668
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response_test.go
@@ -0,0 +1,24 @@
+package sarama
+
+import "testing"
+
+var (
+ leaveGroupResponseNoError = []byte{0x00, 0x00}
+ leaveGroupResponseWithError = []byte{0, 25}
+)
+
+func TestLeaveGroupResponse(t *testing.T) {
+ var response *LeaveGroupResponse
+
+ response = new(LeaveGroupResponse)
+ testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding error failed: no error expected but found", response.Err)
+ }
+
+ response = new(LeaveGroupResponse)
+ testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0)
+ if response.Err != ErrUnknownMemberId {
+ t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 000000000..70078be5d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,29 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+ startOffset int
+}
+
+func (l *lengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+ return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+ binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+ return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+ if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 000000000..3b16abf7f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request_test.go b/vendor/github.com/Shopify/sarama/list_groups_request_test.go
new file mode 100644
index 000000000..2e977d9a5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request_test.go
@@ -0,0 +1,7 @@
+package sarama
+
+import "testing"
+
+func TestListGroupsRequest(t *testing.T) {
+ testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{})
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 000000000..56115d4c7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Err KError
+ Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Groups = make(map[string]string)
+ for i := 0; i < n; i++ {
+ groupId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response_test.go b/vendor/github.com/Shopify/sarama/list_groups_response_test.go
new file mode 100644
index 000000000..41ab822f9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response_test.go
@@ -0,0 +1,58 @@
+package sarama
+
+import (
+ "testing"
+)
+
+var (
+ listGroupsResponseEmpty = []byte{
+ 0, 0, // no error
+ 0, 0, 0, 0, // no groups
+ }
+
+ listGroupsResponseError = []byte{
+ 0, 31, // no error
+ 0, 0, 0, 0, // ErrClusterAuthorizationFailed
+ }
+
+ listGroupsResponseWithConsumer = []byte{
+ 0, 0, // no error
+ 0, 0, 0, 1, // 1 group
+ 0, 3, 'f', 'o', 'o', // group name
+ 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type
+ }
+)
+
+func TestListGroupsResponse(t *testing.T) {
+ var response *ListGroupsResponse
+
+ response = new(ListGroupsResponse)
+ testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0)
+ if response.Err != ErrNoError {
+ t.Error("Expected no gerror, found:", response.Err)
+ }
+ if len(response.Groups) != 0 {
+ t.Error("Expected no groups")
+ }
+
+ response = new(ListGroupsResponse)
+ testVersionDecodable(t, "no error", response, listGroupsResponseError, 0)
+ if response.Err != ErrClusterAuthorizationFailed {
+ t.Error("Expected no gerror, found:", response.Err)
+ }
+ if len(response.Groups) != 0 {
+ t.Error("Expected no groups")
+ }
+
+ response = new(ListGroupsResponse)
+ testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0)
+ if response.Err != ErrNoError {
+ t.Error("Expected no gerror, found:", response.Err)
+ }
+ if len(response.Groups) != 1 {
+ t.Error("Expected one group")
+ }
+ if response.Groups["foo"] != "consumer" {
+ t.Error("Expected foo group to use consumer protocol")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 000000000..86b4ac32d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,212 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// only the last two bits are really used
+const compressionCodecMask int8 = 0x03
+
+const (
+ CompressionNone CompressionCodec = 0
+ CompressionGZIP CompressionCodec = 1
+ CompressionSnappy CompressionCodec = 2
+ CompressionLZ4 CompressionCodec = 3
+)
+
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+ Version int8 // v1 requires Kafka 0.10
+ Timestamp time.Time // the timestamp of the message (version 1+ only)
+
+ compressedCache []byte
+ compressedSize int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(&crc32Field{})
+
+ pe.putInt8(m.Version)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ pe.putInt8(attributes)
+
+ if m.Version >= 1 {
+ timestamp := int64(-1)
+
+ if !m.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !m.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
+ }
+
+ pe.putInt64(timestamp)
+ }
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else if m.Value != nil {
+ switch m.Codec {
+ case CompressionNone:
+ payload = m.Value
+ case CompressionGZIP:
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+ case CompressionSnappy:
+ tmp := snappy.Encode(m.Value)
+ m.compressedCache = tmp
+ payload = m.compressedCache
+ case CompressionLZ4:
+ var buf bytes.Buffer
+ writer := lz4.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+
+ default:
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
+ }
+ // Keep in mind the compressed payload size for metric gathering
+ m.compressedSize = len(payload)
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ err = pd.push(&crc32Field{})
+ if err != nil {
+ return err
+ }
+
+ m.Version, err = pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+ if m.Version >= 1 {
+ millis, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // negative timestamps are invalid, in these cases we should return
+ // a zero time
+ timestamp := time.Time{}
+ if millis >= 0 {
+ timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+
+ m.Timestamp = timestamp
+ }
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ // Required for deep equal assertion during tests but might be useful
+ // for future metrics about the compression ratio in fetch requests
+ m.compressedSize = len(m.Value)
+
+ switch m.Codec {
+ case CompressionNone:
+ // nothing to do
+ case CompressionGZIP:
+ if m.Value == nil {
+ break
+ }
+ reader, err := gzip.NewReader(bytes.NewReader(m.Value))
+ if err != nil {
+ return err
+ }
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionSnappy:
+ if m.Value == nil {
+ break
+ }
+ if m.Value, err = snappy.Decode(m.Value); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionLZ4:
+ if m.Value == nil {
+ break
+ }
+ reader := lz4.NewReader(bytes.NewReader(m.Value))
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+
+ default:
+ return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 000000000..f028784e5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,89 @@
+package sarama
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if err = pd.push(&lengthField{}); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ switch err {
+ case nil:
+ ms.Messages = append(ms.Messages, msb)
+ case ErrInsufficientData:
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ ms.PartialTrailingMessage = true
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_test.go b/vendor/github.com/Shopify/sarama/message_test.go
new file mode 100644
index 000000000..d4a37c22d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_test.go
@@ -0,0 +1,181 @@
+package sarama
+
+import (
+ "runtime"
+ "testing"
+ "time"
+)
+
+var (
+ emptyMessage = []byte{
+ 167, 236, 104, 3, // CRC
+ 0x00, // magic version byte
+ 0x00, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0xFF, 0xFF, 0xFF, 0xFF} // value
+
+ emptyGzipMessage = []byte{
+ 97, 79, 149, 90, //CRC
+ 0x00, // magic version byte
+ 0x01, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ // value
+ 0x00, 0x00, 0x00, 0x17,
+ 0x1f, 0x8b,
+ 0x08,
+ 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
+
+ emptyGzipMessage18 = []byte{
+ 132, 99, 80, 148, //CRC
+ 0x00, // magic version byte
+ 0x01, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ // value
+ 0x00, 0x00, 0x00, 0x17,
+ 0x1f, 0x8b,
+ 0x08,
+ 0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
+
+ emptyLZ4Message = []byte{
+ 132, 219, 238, 101, // CRC
+ 0x01, // version byte
+ 0x03, // attribute flags: lz4
+ 0, 0, 1, 88, 141, 205, 89, 56, // timestamp
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0x00, 0x00, 0x00, 0x0f, // len
+ 0x04, 0x22, 0x4D, 0x18, // LZ4 magic number
+ 100, // LZ4 flags: version 01, block indepedant, content checksum
+ 112, 185, 0, 0, 0, 0, // LZ4 data
+ 5, 93, 204, 2, // LZ4 checksum
+ }
+
+ emptyBulkSnappyMessage = []byte{
+ 180, 47, 53, 209, //CRC
+ 0x00, // magic version byte
+ 0x02, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0, 0, 0, 42,
+ 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
+ 0, 0, 0, 1, // min version
+ 0, 0, 0, 1, // default version
+ 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
+
+ emptyBulkGzipMessage = []byte{
+ 139, 160, 63, 141, //CRC
+ 0x00, // magic version byte
+ 0x01, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0x00, 0x00, 0x00, 0x27, // len
+ 0x1f, 0x8b, // Gzip Magic
+ 0x08, // deflate compressed
+ 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
+
+ emptyBulkLZ4Message = []byte{
+ 246, 12, 188, 129, // CRC
+ 0x01, // Version
+ 0x03, // attribute flags (LZ4)
+ 255, 255, 249, 209, 212, 181, 73, 201, // timestamp
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0x00, 0x00, 0x00, 0x47, // len
+ 0x04, 0x22, 0x4D, 0x18, // magic number lz4
+ 100, // lz4 flags 01100100
+ // version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00
+ 112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0,
+ 71, 129, 23, 111, // LZ4 checksum
+ }
+)
+
+func TestMessageEncoding(t *testing.T) {
+ message := Message{}
+ testEncodable(t, "empty", &message, emptyMessage)
+
+ message.Value = []byte{}
+ message.Codec = CompressionGZIP
+ if runtime.Version() == "go1.8" {
+ testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
+ } else {
+ testEncodable(t, "empty gzip", &message, emptyGzipMessage)
+ }
+
+ message.Value = []byte{}
+ message.Codec = CompressionLZ4
+ message.Timestamp = time.Unix(1479847795, 0)
+ message.Version = 1
+ testEncodable(t, "empty lz4", &message, emptyLZ4Message)
+}
+
+func TestMessageDecoding(t *testing.T) {
+ message := Message{}
+ testDecodable(t, "empty", &message, emptyMessage)
+ if message.Codec != CompressionNone {
+ t.Error("Decoding produced compression codec where there was none.")
+ }
+ if message.Key != nil {
+ t.Error("Decoding produced key where there was none.")
+ }
+ if message.Value != nil {
+ t.Error("Decoding produced value where there was none.")
+ }
+ if message.Set != nil {
+ t.Error("Decoding produced set where there was none.")
+ }
+
+ testDecodable(t, "empty gzip", &message, emptyGzipMessage)
+ if message.Codec != CompressionGZIP {
+ t.Error("Decoding produced incorrect compression codec (was gzip).")
+ }
+ if message.Key != nil {
+ t.Error("Decoding produced key where there was none.")
+ }
+ if message.Value == nil || len(message.Value) != 0 {
+ t.Error("Decoding produced nil or content-ful value where there was an empty array.")
+ }
+}
+
+func TestMessageDecodingBulkSnappy(t *testing.T) {
+ message := Message{}
+ testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
+ if message.Codec != CompressionSnappy {
+ t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
+ }
+ if message.Key != nil {
+ t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
+ }
+ if message.Set == nil {
+ t.Error("Decoding produced no set, but one was expected.")
+ } else if len(message.Set.Messages) != 2 {
+ t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
+ }
+}
+
+func TestMessageDecodingBulkGzip(t *testing.T) {
+ message := Message{}
+ testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
+ if message.Codec != CompressionGZIP {
+ t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
+ }
+ if message.Key != nil {
+ t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
+ }
+ if message.Set == nil {
+ t.Error("Decoding produced no set, but one was expected.")
+ } else if len(message.Set.Messages) != 2 {
+ t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
+ }
+}
+
+func TestMessageDecodingBulkLZ4(t *testing.T) {
+ message := Message{}
+ testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message)
+ if message.Codec != CompressionLZ4 {
+ t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4)
+ }
+ if message.Key != nil {
+ t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
+ }
+ if message.Set == nil {
+ t.Error("Decoding produced no set, but one was expected.")
+ } else if len(message.Set.Messages) != 2 {
+ t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 000000000..9a26b55fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type MetadataRequest struct {
+ Topics []string
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+
+ for i := range r.Topics {
+ err = pe.putString(r.Topics[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.Topics = make([]string, topicCount)
+ for i := range r.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topics[i] = topic
+ }
+ return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+ return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request_test.go b/vendor/github.com/Shopify/sarama/metadata_request_test.go
new file mode 100644
index 000000000..44f3146e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request_test.go
@@ -0,0 +1,29 @@
+package sarama
+
+import "testing"
+
+var (
+ metadataRequestNoTopics = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+
+ metadataRequestOneTopic = []byte{
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
+
+ metadataRequestThreeTopics = []byte{
+ 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x03, 'f', 'o', 'o',
+ 0x00, 0x03, 'b', 'a', 'r',
+ 0x00, 0x03, 'b', 'a', 'z'}
+)
+
+func TestMetadataRequest(t *testing.T) {
+ request := new(MetadataRequest)
+ testRequest(t, "no topics", request, metadataRequestNoTopics)
+
+ request.Topics = []string{"topic1"}
+ testRequest(t, "one topic", request, metadataRequestOneTopic)
+
+ request.Topics = []string{"foo", "bar", "baz"}
+ testRequest(t, "three topics", request, metadataRequestThreeTopics)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 000000000..f9d6a4271
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,239 @@
+package sarama
+
+type PartitionMetadata struct {
+ Err KError
+ ID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pm.Err = KError(tmp)
+
+ pm.ID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Leader, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ pm.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(pm.Err))
+ pe.putInt32(pm.ID)
+ pe.putInt32(pm.Leader)
+
+ err = pe.putInt32Array(pm.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(pm.Isr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type TopicMetadata struct {
+ Err KError
+ Name string
+ Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ tm.Err = KError(tmp)
+
+ tm.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ tm.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ tm.Partitions[i] = new(PartitionMetadata)
+ err = tm.Partitions[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(tm.Err))
+
+ err = pe.putString(tm.Name)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(tm.Partitions))
+ if err != nil {
+ return err
+ }
+
+ for _, pm := range tm.Partitions {
+ err = pm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type MetadataResponse struct {
+ Brokers []*Broker
+ Topics []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Brokers = make([]*Broker, n)
+ for i := 0; i < n; i++ {
+ r.Brokers[i] = new(Broker)
+ err = r.Brokers[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]*TopicMetadata, n)
+ for i := 0; i < n; i++ {
+ r.Topics[i] = new(TopicMetadata)
+ err = r.Topics[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Brokers))
+ if err != nil {
+ return err
+ }
+ for _, broker := range r.Brokers {
+ err = broker.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+ for _, tm := range r.Topics {
+ err = tm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+ return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+ r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range r.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+ tmatch := r.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ pmatch.Isr = isr
+ pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response_test.go b/vendor/github.com/Shopify/sarama/metadata_response_test.go
new file mode 100644
index 000000000..ea62a4f1b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response_test.go
@@ -0,0 +1,139 @@
+package sarama
+
+import "testing"
+
+var (
+ emptyMetadataResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+
+ brokersNoTopicsMetadataResponse = []byte{
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x00, 0xab, 0xff,
+ 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
+ 0x00, 0x00, 0x00, 0x33,
+
+ 0x00, 0x01, 0x02, 0x03,
+ 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
+ 0x00, 0x00, 0x01, 0x11,
+
+ 0x00, 0x00, 0x00, 0x00}
+
+ topicsNoBrokersMetadataResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x00,
+ 0x00, 0x03, 'f', 'o', 'o',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+ 0x00, 0x03, 'b', 'a', 'r',
+ 0x00, 0x00, 0x00, 0x00}
+)
+
+func TestEmptyMetadataResponse(t *testing.T) {
+ response := MetadataResponse{}
+
+ testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0)
+ if len(response.Brokers) != 0 {
+ t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
+ }
+ if len(response.Topics) != 0 {
+ t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
+ }
+}
+
+func TestMetadataResponseWithBrokers(t *testing.T) {
+ response := MetadataResponse{}
+
+ testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0)
+ if len(response.Brokers) != 2 {
+ t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
+ }
+
+ if response.Brokers[0].id != 0xabff {
+ t.Error("Decoding produced invalid broker 0 id.")
+ }
+ if response.Brokers[0].addr != "localhost:51" {
+ t.Error("Decoding produced invalid broker 0 address.")
+ }
+ if response.Brokers[1].id != 0x010203 {
+ t.Error("Decoding produced invalid broker 1 id.")
+ }
+ if response.Brokers[1].addr != "google.com:273" {
+ t.Error("Decoding produced invalid broker 1 address.")
+ }
+
+ if len(response.Topics) != 0 {
+ t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
+ }
+}
+
+func TestMetadataResponseWithTopics(t *testing.T) {
+ response := MetadataResponse{}
+
+ testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0)
+ if len(response.Brokers) != 0 {
+ t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
+ }
+
+ if len(response.Topics) != 2 {
+ t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
+ }
+
+ if response.Topics[0].Err != ErrNoError {
+ t.Error("Decoding produced invalid topic 0 error.")
+ }
+
+ if response.Topics[0].Name != "foo" {
+ t.Error("Decoding produced invalid topic 0 name.")
+ }
+
+ if len(response.Topics[0].Partitions) != 1 {
+ t.Fatal("Decoding produced invalid partition count for topic 0.")
+ }
+
+ if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize {
+ t.Error("Decoding produced invalid topic 0 partition 0 error.")
+ }
+
+ if response.Topics[0].Partitions[0].ID != 0x01 {
+ t.Error("Decoding produced invalid topic 0 partition 0 id.")
+ }
+
+ if response.Topics[0].Partitions[0].Leader != 0x07 {
+ t.Error("Decoding produced invalid topic 0 partition 0 leader.")
+ }
+
+ if len(response.Topics[0].Partitions[0].Replicas) != 3 {
+ t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
+ }
+ for i := 0; i < 3; i++ {
+ if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
+ t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
+ }
+ }
+
+ if len(response.Topics[0].Partitions[0].Isr) != 0 {
+ t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
+ }
+
+ if response.Topics[1].Err != ErrNoError {
+ t.Error("Decoding produced invalid topic 1 error.")
+ }
+
+ if response.Topics[1].Name != "bar" {
+ t.Error("Decoding produced invalid topic 0 name.")
+ }
+
+ if len(response.Topics[1].Partitions) != 0 {
+ t.Error("Decoding produced invalid partition count for topic 1.")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 000000000..4869708e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+ metricsReservoirSize = 1028
+ metricsAlphaFactor = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+ return r.GetOrRegister(name, func() metrics.Histogram {
+ return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+ }).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+ // Use broker id like the Java client as it does not contain '.' or ':' characters that
+ // can be interpreted as special character by monitoring tool (e.g. Graphite)
+ return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
+}
+
+func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+ // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+ // cf. KAFKA-1902 and KAFKA-2337
+ return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics_test.go b/vendor/github.com/Shopify/sarama/metrics_test.go
new file mode 100644
index 000000000..789c0ff33
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics_test.go
@@ -0,0 +1,172 @@
+package sarama
+
+import (
+ "testing"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+func TestGetOrRegisterHistogram(t *testing.T) {
+ metricRegistry := metrics.NewRegistry()
+ histogram := getOrRegisterHistogram("name", metricRegistry)
+
+ if histogram == nil {
+ t.Error("Unexpected nil histogram")
+ }
+
+ // Fetch the metric
+ foundHistogram := metricRegistry.Get("name")
+
+ if foundHistogram != histogram {
+ t.Error("Unexpected different histogram", foundHistogram, histogram)
+ }
+
+ // Try to register the metric again
+ sameHistogram := getOrRegisterHistogram("name", metricRegistry)
+
+ if sameHistogram != histogram {
+ t.Error("Unexpected different histogram", sameHistogram, histogram)
+ }
+}
+
+func TestGetMetricNameForBroker(t *testing.T) {
+ metricName := getMetricNameForBroker("name", &Broker{id: 1})
+
+ if metricName != "name-for-broker-1" {
+ t.Error("Unexpected metric name", metricName)
+ }
+}
+
+// Common type and functions for metric validation
+type metricValidator struct {
+ name string
+ validator func(*testing.T, interface{})
+}
+
+type metricValidators []*metricValidator
+
+func newMetricValidators() metricValidators {
+ return make([]*metricValidator, 0, 32)
+}
+
+func (m *metricValidators) register(validator *metricValidator) {
+ *m = append(*m, validator)
+}
+
+func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) {
+ m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator})
+}
+
+func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) {
+ m.register(&metricValidator{validator.name, validator.validator})
+ m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator})
+}
+
+func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) {
+ m.register(validator)
+ m.registerForBroker(broker, validator)
+}
+
+func (m metricValidators) run(t *testing.T, r metrics.Registry) {
+ for _, metricValidator := range m {
+ metric := r.Get(metricValidator.name)
+ if metric == nil {
+ t.Error("No metric named", metricValidator.name)
+ } else {
+ metricValidator.validator(t, metric)
+ }
+ }
+}
+
+func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator {
+ return &metricValidator{
+ name: name,
+ validator: func(t *testing.T, metric interface{}) {
+ if meter, ok := metric.(metrics.Meter); !ok {
+ t.Errorf("Expected meter metric for '%s', got %T", name, metric)
+ } else {
+ extraValidator(t, meter)
+ }
+ },
+ }
+}
+
+func countMeterValidator(name string, expectedCount int) *metricValidator {
+ return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
+ count := meter.Count()
+ if count != int64(expectedCount) {
+ t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count)
+ }
+ })
+}
+
+func minCountMeterValidator(name string, minCount int) *metricValidator {
+ return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
+ count := meter.Count()
+ if count < int64(minCount) {
+ t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count)
+ }
+ })
+}
+
+func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator {
+ return &metricValidator{
+ name: name,
+ validator: func(t *testing.T, metric interface{}) {
+ if histogram, ok := metric.(metrics.Histogram); !ok {
+ t.Errorf("Expected histogram metric for '%s', got %T", name, metric)
+ } else {
+ extraValidator(t, histogram)
+ }
+ },
+ }
+}
+
+func countHistogramValidator(name string, expectedCount int) *metricValidator {
+ return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
+ count := histogram.Count()
+ if count != int64(expectedCount) {
+ t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count)
+ }
+ })
+}
+
+func minCountHistogramValidator(name string, minCount int) *metricValidator {
+ return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
+ count := histogram.Count()
+ if count < int64(minCount) {
+ t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count)
+ }
+ })
+}
+
+func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator {
+ return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
+ min := int(histogram.Min())
+ if min != expectedMin {
+ t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min)
+ }
+ max := int(histogram.Max())
+ if max != expectedMax {
+ t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max)
+ }
+ })
+}
+
+func minValHistogramValidator(name string, minMin int) *metricValidator {
+ return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
+ min := int(histogram.Min())
+ if min < minMin {
+ t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min)
+ }
+ })
+}
+
+func maxValHistogramValidator(name string, maxMax int) *metricValidator {
+ return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
+ max := int(histogram.Max())
+ if max > maxMax {
+ t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max)
+ }
+ })
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 000000000..0734d34f6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,324 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const (
+ expectationTimeout = 500 * time.Millisecond
+)
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+ brokerID int32
+ port int32
+ closing chan none
+ stopper chan none
+ expectations chan encoder
+ listener net.Listener
+ t TestReporter
+ latency time.Duration
+ handler requestHandlerFunc
+ notifier RequestNotifierFunc
+ history []RequestResponse
+ lock sync.Mutex
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+ Request protocolBody
+ Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+ b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+ b.setHandler(func(req *request) (res encoder) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ mockResponse := handlerMap[reqTypeName]
+ if mockResponse == nil {
+ return nil
+ }
+ return mockResponse.For(req.body)
+ })
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+ b.lock.Lock()
+ b.notifier = notifier
+ b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+ return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+ b.lock.Lock()
+ history := make([]RequestResponse, len(b.history))
+ copy(history, b.history)
+ b.lock.Unlock()
+ return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+ return b.port
+}
+
+// Addr returns the broker connection string in the form "
:".
+func (b *MockBroker) Addr() string {
+ return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+ close(b.expectations)
+ if len(b.expectations) > 0 {
+ buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+ for e := range b.expectations {
+ _, _ = buf.WriteString(spew.Sdump(e))
+ }
+ b.t.Error(buf.String())
+ }
+ close(b.closing)
+ <-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+ b.lock.Lock()
+ b.handler = handler
+ b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+ defer close(b.stopper)
+ var err error
+ var conn net.Conn
+
+ go func() {
+ <-b.closing
+ err := b.listener.Close()
+ if err != nil {
+ b.t.Error(err)
+ }
+ }()
+
+ wg := &sync.WaitGroup{}
+ i := 0
+ for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+ wg.Add(1)
+ go b.handleRequests(conn, i, wg)
+ i++
+ }
+ wg.Wait()
+ Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+ defer wg.Done()
+ defer func() {
+ _ = conn.Close()
+ }()
+ Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+ var err error
+
+ abort := make(chan none)
+ defer close(abort)
+ go func() {
+ select {
+ case <-b.closing:
+ _ = conn.Close()
+ case <-abort:
+ }
+ }()
+
+ resHeader := make([]byte, 8)
+ for {
+ req, bytesRead, err := decodeRequest(conn)
+ if err != nil {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+ b.serverError(err)
+ break
+ }
+
+ if b.latency > 0 {
+ time.Sleep(b.latency)
+ }
+
+ b.lock.Lock()
+ res := b.handler(req)
+ b.history = append(b.history, RequestResponse{req.body, res})
+ b.lock.Unlock()
+
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+ continue
+ }
+ Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+ encodedRes, err := encode(res, nil)
+ if err != nil {
+ b.serverError(err)
+ break
+ }
+ if len(encodedRes) == 0 {
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, 0)
+ }
+ b.lock.Unlock()
+ continue
+ }
+
+ binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+ binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+ if _, err = conn.Write(resHeader); err != nil {
+ b.serverError(err)
+ break
+ }
+ if _, err = conn.Write(encodedRes); err != nil {
+ b.serverError(err)
+ break
+ }
+
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+ }
+ b.lock.Unlock()
+ }
+ Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+ select {
+ case res, ok := <-b.expectations:
+ if !ok {
+ return nil
+ }
+ return res
+ case <-time.After(expectationTimeout):
+ return nil
+ }
+}
+
+func (b *MockBroker) serverError(err error) {
+ isConnectionClosedError := false
+ if _, ok := err.(*net.OpError); ok {
+ isConnectionClosedError = true
+ } else if err == io.EOF {
+ isConnectionClosedError = true
+ } else if err.Error() == "use of closed network connection" {
+ isConnectionClosedError = true
+ }
+
+ if isConnectionClosedError {
+ return
+ }
+
+ b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use. If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+ return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+ var err error
+
+ broker := &MockBroker{
+ closing: make(chan none),
+ stopper: make(chan none),
+ t: t,
+ brokerID: brokerID,
+ expectations: make(chan encoder, 512),
+ }
+ broker.handler = broker.defaultRequestHandler
+
+ broker.listener, err = net.Listen("tcp", addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+ _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ broker.port = int32(tmp)
+
+ go broker.serverLoop()
+
+ return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+ b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 000000000..a20314209
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,455 @@
+package sarama
+
+import (
+ "fmt"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+ Error(...interface{})
+ Errorf(string, ...interface{})
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+ For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+ res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+ return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+ return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+ responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+ ms := &MockSequence{}
+ ms.responses = make([]MockResponse, len(responses))
+ for i, res := range responses {
+ switch res := res.(type) {
+ case MockResponse:
+ ms.responses[i] = res
+ case encoder:
+ ms.responses[i] = NewMockWrapper(res)
+ default:
+ panic(fmt.Sprintf("Unexpected response type: %T", res))
+ }
+ }
+ return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+ res = mc.responses[0].For(reqBody)
+ if len(mc.responses) > 1 {
+ mc.responses = mc.responses[1:]
+ }
+ return res
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+ leaders map[string]map[int32]int32
+ brokers map[string]int32
+ t TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+ return &MockMetadataResponse{
+ leaders: make(map[string]map[int32]int32),
+ brokers: make(map[string]int32),
+ t: t,
+ }
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+ partitions := mmr.leaders[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int32)
+ mmr.leaders[topic] = partitions
+ }
+ partitions[partition] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+ mmr.brokers[addr] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+ metadataRequest := reqBody.(*MetadataRequest)
+ metadataResponse := &MetadataResponse{}
+ for addr, brokerID := range mmr.brokers {
+ metadataResponse.AddBroker(addr, brokerID)
+ }
+ if len(metadataRequest.Topics) == 0 {
+ for topic, partitions := range mmr.leaders {
+ for partition, brokerID := range partitions {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+ }
+ for _, topic := range metadataRequest.Topics {
+ for partition, brokerID := range mmr.leaders[topic] {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+ offsets map[string]map[int32]map[int64]int64
+ t TestReporter
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+ return &MockOffsetResponse{
+ offsets: make(map[string]map[int32]map[int64]int64),
+ t: t,
+ }
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]int64)
+ mor.offsets[topic] = partitions
+ }
+ times := partitions[partition]
+ if times == nil {
+ times = make(map[int64]int64)
+ partitions[partition] = times
+ }
+ times[time] = offset
+ return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+ offsetRequest := reqBody.(*OffsetRequest)
+ offsetResponse := &OffsetResponse{}
+ for topic, partitions := range offsetRequest.blocks {
+ for partition, block := range partitions {
+ offset := mor.getOffset(topic, partition, block.time)
+ offsetResponse.AddTopicPartition(topic, partition, offset)
+ }
+ }
+ return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ mor.t.Errorf("missing topic: %s", topic)
+ }
+ times := partitions[partition]
+ if times == nil {
+ mor.t.Errorf("missing partition: %d", partition)
+ }
+ offset, ok := times[time]
+ if !ok {
+ mor.t.Errorf("missing time: %d", time)
+ }
+ return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+ messages map[string]map[int32]map[int64]Encoder
+ highWaterMarks map[string]map[int32]int64
+ t TestReporter
+ batchSize int
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+ return &MockFetchResponse{
+ messages: make(map[string]map[int32]map[int64]Encoder),
+ highWaterMarks: make(map[string]map[int32]int64),
+ t: t,
+ batchSize: batchSize,
+ }
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]Encoder)
+ mfr.messages[topic] = partitions
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ messages = make(map[int64]Encoder)
+ partitions[partition] = messages
+ }
+ messages[offset] = msg
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int64)
+ mfr.highWaterMarks[topic] = partitions
+ }
+ partitions[partition] = offset
+ return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+ fetchRequest := reqBody.(*FetchRequest)
+ res := &FetchResponse{}
+ for topic, partitions := range fetchRequest.blocks {
+ for partition, block := range partitions {
+ initialOffset := block.fetchOffset
+ offset := initialOffset
+ maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+ for i := 0; i < mfr.batchSize && offset < maxOffset; {
+ msg := mfr.getMessage(topic, partition, offset)
+ if msg != nil {
+ res.AddMessage(topic, partition, nil, msg, offset)
+ i++
+ }
+ offset++
+ }
+ fb := res.GetBlock(topic, partition)
+ if fb == nil {
+ res.AddError(topic, partition, ErrNoError)
+ fb = res.GetBlock(topic, partition)
+ }
+ fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+ }
+ }
+ return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return nil
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return nil
+ }
+ return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return 0
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return 0
+ }
+ return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ return 0
+ }
+ return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+ coordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+ return &MockConsumerMetadataResponse{
+ coordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = broker
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = kerror
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ConsumerMetadataRequest)
+ group := req.ConsumerGroup
+ res := &ConsumerMetadataResponse{}
+ v := mr.coordinators[group]
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+ errors map[string]map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+ return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[string]map[int32]KError)
+ }
+ topics := mr.errors[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]KError)
+ mr.errors[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ topics[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetCommitRequest)
+ group := req.ConsumerGroup
+ res := &OffsetCommitResponse{}
+ for topic, partitions := range req.blocks {
+ for partition := range partitions {
+ res.AddError(topic, partition, mr.getError(group, topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+ topics := mr.errors[group]
+ if topics == nil {
+ return ErrNoError
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+ errors map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+ return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[int32]KError)
+ }
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ mr.errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ProduceRequest)
+ res := &ProduceResponse{}
+ for topic, partitions := range req.msgSets {
+ for partition := range partitions {
+ res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+ offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+ t TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+ return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+ if mr.offsets == nil {
+ mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ topics := mr.offsets[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ mr.offsets[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ topics[topic] = partitions
+ }
+ partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetFetchRequest)
+ group := req.ConsumerGroup
+ res := &OffsetFetchResponse{}
+ for topic, partitions := range mr.offsets[group] {
+ for partition, block := range partitions {
+ res.AddBlock(topic, partition, block)
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md
new file mode 100644
index 000000000..55a6c2e61
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/README.md
@@ -0,0 +1,13 @@
+# sarama/mocks
+
+The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
+You can use them to test your sarama applications using dependency injection.
+
+The following mock objects are available:
+
+- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
+- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
+- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
+
+The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
+and the results will be reported to the `*testing.T` object you provided when creating the mock.
diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go
new file mode 100644
index 000000000..24ae5c0d5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/async_producer.go
@@ -0,0 +1,174 @@
+package mocks
+
+import (
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+// AsyncProducer implements sarama's Producer interface for testing purposes.
+// Before you can send messages to it's Input channel, you have to set expectations
+// so it knows how to handle the input; it returns an error if the number of messages
+// received is bigger then the number of expectations set. You can also set a
+// function in each expectation so that the message value is checked by this function
+// and an error is returned if the match fails.
+type AsyncProducer struct {
+ l sync.Mutex
+ t ErrorReporter
+ expectations []*producerExpectation
+ closed chan struct{}
+ input chan *sarama.ProducerMessage
+ successes chan *sarama.ProducerMessage
+ errors chan *sarama.ProducerError
+ lastOffset int64
+}
+
+// NewAsyncProducer instantiates a new Producer mock. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument is used to determine whether it
+// should ack successes on the Successes channel.
+func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
+ if config == nil {
+ config = sarama.NewConfig()
+ }
+ mp := &AsyncProducer{
+ t: t,
+ closed: make(chan struct{}, 0),
+ expectations: make([]*producerExpectation, 0),
+ input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
+ successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
+ errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
+ }
+
+ go func() {
+ defer func() {
+ close(mp.successes)
+ close(mp.errors)
+ }()
+
+ for msg := range mp.input {
+ mp.l.Lock()
+ if mp.expectations == nil || len(mp.expectations) == 0 {
+ mp.expectations = nil
+ mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
+ } else {
+ expectation := mp.expectations[0]
+ mp.expectations = mp.expectations[1:]
+ if expectation.CheckFunction != nil {
+ if val, err := msg.Value.Encode(); err != nil {
+ mp.t.Errorf("Input message encoding failed: %s", err.Error())
+ mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
+ } else {
+ err = expectation.CheckFunction(val)
+ if err != nil {
+ mp.t.Errorf("Check function returned an error: %s", err.Error())
+ mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
+ }
+ }
+ }
+ if expectation.Result == errProduceSuccess {
+ mp.lastOffset++
+ if config.Producer.Return.Successes {
+ msg.Offset = mp.lastOffset
+ mp.successes <- msg
+ }
+ } else {
+ if config.Producer.Return.Errors {
+ mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
+ }
+ }
+ }
+ mp.l.Unlock()
+ }
+
+ mp.l.Lock()
+ if len(mp.expectations) > 0 {
+ mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
+ }
+ mp.l.Unlock()
+
+ close(mp.closed)
+ }()
+
+ return mp
+}
+
+////////////////////////////////////////////////
+// Implement Producer interface
+////////////////////////////////////////////////
+
+// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
+// By closing a mock producer, you also tell it that no more input will be provided, so it will
+// write an error to the test state if there's any remaining expectations.
+func (mp *AsyncProducer) AsyncClose() {
+ close(mp.input)
+}
+
+// Close corresponds with the Close method of sarama's Producer implementation.
+// By closing a mock producer, you also tell it that no more input will be provided, so it will
+// write an error to the test state if there's any remaining expectations.
+func (mp *AsyncProducer) Close() error {
+ mp.AsyncClose()
+ <-mp.closed
+ return nil
+}
+
+// Input corresponds with the Input method of sarama's Producer implementation.
+// You have to set expectations on the mock producer before writing messages to the Input
+// channel, so it knows how to handle them. If there is no more remaining expectations and
+// a messages is written to the Input channel, the mock producer will write an error to the test
+// state object.
+func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
+ return mp.input
+}
+
+// Successes corresponds with the Successes method of sarama's Producer implementation.
+func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
+ return mp.successes
+}
+
+// Errors corresponds with the Errors method of sarama's Producer implementation.
+func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
+ return mp.errors
+}
+
+////////////////////////////////////////////////
+// Setting expectations
+////////////////////////////////////////////////
+
+// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
+// will be provided on the input channel. The mock producer will call the given function to check
+// the message value. If an error is returned it will be made available on the Errors channel
+// otherwise the mock will handle the message as if it produced successfully, i.e. it will make
+// it available on the Successes channel if the Producer.Return.Successes setting is set to true.
+func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) {
+ mp.l.Lock()
+ defer mp.l.Unlock()
+ mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
+}
+
+// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message
+// will be provided on the input channel. The mock producer will first call the given function to
+// check the message value. If an error is returned it will be made available on the Errors channel
+// otherwise the mock will handle the message as if it failed to produce successfully. This means
+// it will make a ProducerError available on the Errors channel.
+func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) {
+ mp.l.Lock()
+ defer mp.l.Unlock()
+ mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
+}
+
+// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
+// on the input channel. The mock producer will handle the message as if it is produced successfully,
+// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
+// is set to true.
+func (mp *AsyncProducer) ExpectInputAndSucceed() {
+ mp.ExpectInputWithCheckerFunctionAndSucceed(nil)
+}
+
+// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
+// on the input channel. The mock producer will handle the message as if it failed to produce
+// successfully. This means it will make a ProducerError available on the Errors channel.
+func (mp *AsyncProducer) ExpectInputAndFail(err error) {
+ mp.ExpectInputWithCheckerFunctionAndFail(nil, err)
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
new file mode 100644
index 000000000..b5d92aad8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
@@ -0,0 +1,132 @@
+package mocks
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/Shopify/sarama"
+)
+
+func generateRegexpChecker(re string) func([]byte) error {
+ return func(val []byte) error {
+ matched, err := regexp.MatchString(re, string(val))
+ if err != nil {
+ return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error())
+ }
+ if !matched {
+ return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re)
+ }
+ return nil
+ }
+}
+
+type testReporterMock struct {
+ errors []string
+}
+
+func newTestReporterMock() *testReporterMock {
+ return &testReporterMock{errors: make([]string, 0)}
+}
+
+func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
+ trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
+}
+
+func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
+ var mp interface{} = &AsyncProducer{}
+ if _, ok := mp.(sarama.AsyncProducer); !ok {
+ t.Error("The mock producer should implement the sarama.Producer interface.")
+ }
+}
+
+func TestProducerReturnsExpectationsToChannels(t *testing.T) {
+ config := sarama.NewConfig()
+ config.Producer.Return.Successes = true
+ mp := NewAsyncProducer(t, config)
+
+ mp.ExpectInputAndSucceed()
+ mp.ExpectInputAndSucceed()
+ mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
+
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
+
+ msg1 := <-mp.Successes()
+ msg2 := <-mp.Successes()
+ err1 := <-mp.Errors()
+
+ if msg1.Topic != "test 1" {
+ t.Error("Expected message 1 to be returned first")
+ }
+
+ if msg2.Topic != "test 2" {
+ t.Error("Expected message 2 to be returned second")
+ }
+
+ if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
+ t.Error("Expected message 3 to be returned as error")
+ }
+
+ if err := mp.Close(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestProducerWithTooFewExpectations(t *testing.T) {
+ trm := newTestReporterMock()
+ mp := NewAsyncProducer(trm, nil)
+ mp.ExpectInputAndSucceed()
+
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
+
+ if err := mp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Error("Expected to report an error")
+ }
+}
+
+func TestProducerWithTooManyExpectations(t *testing.T) {
+ trm := newTestReporterMock()
+ mp := NewAsyncProducer(trm, nil)
+ mp.ExpectInputAndSucceed()
+ mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
+
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
+ if err := mp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Error("Expected to report an error")
+ }
+}
+
+func TestProducerWithCheckerFunction(t *testing.T) {
+ trm := newTestReporterMock()
+ mp := NewAsyncProducer(trm, nil)
+ mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
+ mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
+
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ if err := mp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(mp.Errors()) != 1 {
+ t.Error("Expected to report an error")
+ }
+
+ err1 := <-mp.Errors()
+ if !strings.HasPrefix(err1.Err.Error(), "No match") {
+ t.Error("Expected to report a value check error, found: ", err1.Err)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go
new file mode 100644
index 000000000..003d4d3e2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/consumer.go
@@ -0,0 +1,315 @@
+package mocks
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "github.com/Shopify/sarama"
+)
+
+// Consumer implements sarama's Consumer interface for testing purposes.
+// Before you can start consuming from this consumer, you have to register
+// topic/partitions using ExpectConsumePartition, and set expectations on them.
+type Consumer struct {
+ l sync.Mutex
+ t ErrorReporter
+ config *sarama.Config
+ partitionConsumers map[string]map[int32]*PartitionConsumer
+ metadata map[string][]int32
+}
+
+// NewConsumer returns a new mock Consumer instance. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument can be set to nil.
+func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
+ if config == nil {
+ config = sarama.NewConfig()
+ }
+
+ c := &Consumer{
+ t: t,
+ config: config,
+ partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
+ }
+ return c
+}
+
+///////////////////////////////////////////////////
+// Consumer interface implementation
+///////////////////////////////////////////////////
+
+// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
+// Before you can start consuming a partition, you have to set expectations on it using
+// ExpectConsumePartition. You can only consume a partition once per consumer.
+func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
+ c.t.Errorf("No expectations set for %s/%d", topic, partition)
+ return nil, errOutOfExpectations
+ }
+
+ pc := c.partitionConsumers[topic][partition]
+ if pc.consumed {
+ return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
+ }
+
+ if pc.offset != AnyOffset && pc.offset != offset {
+ c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
+ }
+
+ pc.consumed = true
+ return pc, nil
+}
+
+// Topics returns a list of topics, as registered with SetMetadata
+func (c *Consumer) Topics() ([]string, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.metadata == nil {
+ c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
+ return nil, sarama.ErrOutOfBrokers
+ }
+
+ var result []string
+ for topic := range c.metadata {
+ result = append(result, topic)
+ }
+ return result, nil
+}
+
+// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
+func (c *Consumer) Partitions(topic string) ([]int32, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.metadata == nil {
+ c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
+ return nil, sarama.ErrOutOfBrokers
+ }
+ if c.metadata[topic] == nil {
+ return nil, sarama.ErrUnknownTopicOrPartition
+ }
+
+ return c.metadata[topic], nil
+}
+
+func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ hwms := make(map[string]map[int32]int64, len(c.partitionConsumers))
+ for topic, partitionConsumers := range c.partitionConsumers {
+ hwm := make(map[int32]int64, len(partitionConsumers))
+ for partition, pc := range partitionConsumers {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+// Close implements the Close method from the sarama.Consumer interface. It will close
+// all registered PartitionConsumer instances.
+func (c *Consumer) Close() error {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ for _, partitions := range c.partitionConsumers {
+ for _, partitionConsumer := range partitions {
+ _ = partitionConsumer.Close()
+ }
+ }
+
+ return nil
+}
+
+///////////////////////////////////////////////////
+// Expectation API
+///////////////////////////////////////////////////
+
+// SetTopicMetadata sets the clusters topic/partition metadata,
+// which will be returned by Topics() and Partitions().
+func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ c.metadata = metadata
+}
+
+// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
+// The registered PartitionConsumer will be returned, so you can set expectations
+// on it using method chaining. Once a topic/partition is registered, you are
+// expected to start consuming it using ConsumePartition. If that doesn't happen,
+// an error will be written to the error reporter once the mock consumer is closed. It will
+// also expect that the
+func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.partitionConsumers[topic] == nil {
+ c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
+ }
+
+ if c.partitionConsumers[topic][partition] == nil {
+ c.partitionConsumers[topic][partition] = &PartitionConsumer{
+ t: c.t,
+ topic: topic,
+ partition: partition,
+ offset: offset,
+ messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
+ errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
+ }
+ }
+
+ return c.partitionConsumers[topic][partition]
+}
+
+///////////////////////////////////////////////////
+// PartitionConsumer mock type
+///////////////////////////////////////////////////
+
+// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
+// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
+// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
+// Errors and Messages channel, you should specify what values will be provided on these
+// channels using YieldMessage and YieldError.
+type PartitionConsumer struct {
+ highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ l sync.Mutex
+ t ErrorReporter
+ topic string
+ partition int32
+ offset int64
+ messages chan *sarama.ConsumerMessage
+ errors chan *sarama.ConsumerError
+ singleClose sync.Once
+ consumed bool
+ errorsShouldBeDrained bool
+ messagesShouldBeDrained bool
+}
+
+///////////////////////////////////////////////////
+// PartitionConsumer interface implementation
+///////////////////////////////////////////////////
+
+// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) AsyncClose() {
+ pc.singleClose.Do(func() {
+ close(pc.messages)
+ close(pc.errors)
+ })
+}
+
+// Close implements the Close method from the sarama.PartitionConsumer interface. It will
+// verify whether the partition consumer was actually started.
+func (pc *PartitionConsumer) Close() error {
+ if !pc.consumed {
+ pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
+ return errPartitionConsumerNotStarted
+ }
+
+ if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
+ pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
+ }
+
+ if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
+ pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
+ }
+
+ pc.AsyncClose()
+
+ var (
+ closeErr error
+ wg sync.WaitGroup
+ )
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ var errs = make(sarama.ConsumerErrors, 0)
+ for err := range pc.errors {
+ errs = append(errs, err)
+ }
+
+ if len(errs) > 0 {
+ closeErr = errs
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for range pc.messages {
+ // drain
+ }
+ }()
+
+ wg.Wait()
+ return closeErr
+}
+
+// Errors implements the Errors method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
+ return pc.errors
+}
+
+// Messages implements the Messages method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
+ return pc.messages
+}
+
+func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
+}
+
+///////////////////////////////////////////////////
+// Expectation API
+///////////////////////////////////////////////////
+
+// YieldMessage will yield a messages Messages channel of this partition consumer
+// when it is consumed. By default, the mock consumer will not verify whether this
+// message was consumed from the Messages channel, because there are legitimate
+// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
+// verify that the channel is empty on close.
+func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
+ pc.l.Lock()
+ defer pc.l.Unlock()
+
+ msg.Topic = pc.topic
+ msg.Partition = pc.partition
+ msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
+
+ pc.messages <- msg
+}
+
+// YieldError will yield an error on the Errors channel of this partition consumer
+// when it is consumed. By default, the mock consumer will not verify whether this error was
+// consumed from the Errors channel, because there are legitimate reasons for this
+// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
+// the channel is empty on close.
+func (pc *PartitionConsumer) YieldError(err error) {
+ pc.errors <- &sarama.ConsumerError{
+ Topic: pc.topic,
+ Partition: pc.partition,
+ Err: err,
+ }
+}
+
+// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
+// that the messages channel will be fully drained when Close is called. If this
+// expectation is not met, an error is reported to the error reporter.
+func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
+ pc.messagesShouldBeDrained = true
+}
+
+// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
+// that the errors channel will be fully drained when Close is called. If this
+// expectation is not met, an error is reported to the error reporter.
+func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
+ pc.errorsShouldBeDrained = true
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go b/vendor/github.com/Shopify/sarama/mocks/consumer_test.go
new file mode 100644
index 000000000..311cfa026
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/consumer_test.go
@@ -0,0 +1,249 @@
+package mocks
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/Shopify/sarama"
+)
+
+func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
+ var c interface{} = &Consumer{}
+ if _, ok := c.(sarama.Consumer); !ok {
+ t.Error("The mock consumer should implement the sarama.Consumer interface.")
+ }
+
+ var pc interface{} = &PartitionConsumer{}
+ if _, ok := pc.(sarama.PartitionConsumer); !ok {
+ t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
+ }
+}
+
+func TestConsumerHandlesExpectations(t *testing.T) {
+ consumer := NewConsumer(t, nil)
+ defer func() {
+ if err := consumer.Close(); err != nil {
+ t.Error(err)
+ }
+ }()
+
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
+ consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
+ consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
+
+ pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test0_msg := <-pc_test0.Messages()
+ if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
+ t.Error("Message was not as expected:", test0_msg)
+ }
+ test0_err := <-pc_test0.Errors()
+ if test0_err.Err != sarama.ErrOutOfBrokers {
+ t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
+ }
+
+ pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test1_msg := <-pc_test1.Messages()
+ if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
+ t.Error("Message was not as expected:", test1_msg)
+ }
+
+ pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ other0_msg := <-pc_other0.Messages()
+ if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
+ t.Error("Message was not as expected:", other0_msg)
+ }
+}
+
+func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
+ consumer := NewConsumer(t, nil)
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
+
+ pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case <-pc.Messages():
+ t.Error("Did not epxect a message on the messages channel.")
+ case err := <-pc.Errors():
+ if err.Err != sarama.ErrOutOfBrokers {
+ t.Error("Expected sarama.ErrOutOfBrokers, found", err)
+ }
+ }
+
+ errs := pc.Close().(sarama.ConsumerErrors)
+ if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
+ t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
+ }
+}
+
+func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+
+ _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
+ if err != errOutOfExpectations {
+ t.Error("Expected ConsumePartition to return errOutOfExpectations")
+ }
+
+ if err := consumer.Close(); err != nil {
+ t.Error("No error expected on close, but found:", err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Errorf("Expected an expectation failure to be set on the error reporter.")
+ }
+}
+
+func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
+
+ if err := consumer.Close(); err != nil {
+ t.Error("No error expected on close, but found:", err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Errorf("Expected an expectation failure to be set on the error reporter.")
+ }
+}
+
+func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+ consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
+
+ _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
+ if err != nil {
+ t.Error("Did not expect error, found:", err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Errorf("Expected an expectation failure to be set on the error reporter.")
+ }
+
+ if err := consumer.Close(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+ pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
+ pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
+ pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
+ pcmock.ExpectMessagesDrainedOnClose()
+
+ pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // consume first message, not second one
+ <-pc.Messages()
+
+ if err := consumer.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Errorf("Expected an expectation failure to be set on the error reporter.")
+ }
+}
+
+func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+
+ pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
+ pcmock.YieldError(sarama.ErrInvalidMessage)
+ pcmock.YieldError(sarama.ErrInvalidMessage)
+ pcmock.ExpectErrorsDrainedOnClose()
+
+ pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // consume first and second error,
+ <-pc.Errors()
+ <-pc.Errors()
+
+ if err := consumer.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 0 {
+ t.Errorf("Expected no expectation failures to be set on the error reporter.")
+ }
+}
+
+func TestConsumerTopicMetadata(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+
+ consumer.SetTopicMetadata(map[string][]int32{
+ "test1": {0, 1, 2, 3},
+ "test2": {0, 1, 2, 3, 4, 5, 6, 7},
+ })
+
+ topics, err := consumer.Topics()
+ if err != nil {
+ t.Error(t)
+ }
+
+ sortedTopics := sort.StringSlice(topics)
+ sortedTopics.Sort()
+ if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
+ t.Error("Unexpected topics returned:", sortedTopics)
+ }
+
+ partitions1, err := consumer.Partitions("test1")
+ if err != nil {
+ t.Error(t)
+ }
+
+ if len(partitions1) != 4 {
+ t.Error("Unexpected partitions returned:", len(partitions1))
+ }
+
+ partitions2, err := consumer.Partitions("test2")
+ if err != nil {
+ t.Error(t)
+ }
+
+ if len(partitions2) != 8 {
+ t.Error("Unexpected partitions returned:", len(partitions2))
+ }
+
+ if len(trm.errors) != 0 {
+ t.Errorf("Expected no expectation failures to be set on the error reporter.")
+ }
+}
+
+func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
+ trm := newTestReporterMock()
+ consumer := NewConsumer(trm, nil)
+
+ if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
+ t.Error("Expected sarama.ErrOutOfBrokers, found", err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Errorf("Expected an expectation failure to be set on the error reporter.")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go
new file mode 100644
index 000000000..4adb838d9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/mocks.go
@@ -0,0 +1,48 @@
+/*
+Package mocks provides mocks that can be used for testing applications
+that use Sarama. The mock types provided by this package implement the
+interfaces Sarama exports, so you can use them for dependency injection
+in your tests.
+
+All mock instances require you to set expectations on them before you
+can use them. It will determine how the mock will behave. If an
+expectation is not met, it will make your test fail.
+
+NOTE: this package currently does not fall under the API stability
+guarantee of Sarama as it is still considered experimental.
+*/
+package mocks
+
+import (
+ "errors"
+
+ "github.com/Shopify/sarama"
+)
+
+// ErrorReporter is a simple interface that includes the testing.T methods we use to report
+// expectation violations when using the mock objects.
+type ErrorReporter interface {
+ Errorf(string, ...interface{})
+}
+
+// ValueChecker is a function type to be set in each expectation of the producer mocks
+// to check the value passed.
+type ValueChecker func(val []byte) error
+
+var (
+ errProduceSuccess error = nil
+ errOutOfExpectations = errors.New("No more expectations set on mock")
+ errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
+)
+
+const AnyOffset int64 = -1000
+
+type producerExpectation struct {
+ Result error
+ CheckFunction ValueChecker
+}
+
+type consumerExpectation struct {
+ Err error
+ Msg *sarama.ConsumerMessage
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go
new file mode 100644
index 000000000..5de79cce8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go
@@ -0,0 +1,146 @@
+package mocks
+
+import (
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+// SyncProducer implements sarama's SyncProducer interface for testing purposes.
+// Before you can use it, you have to set expectations on the mock SyncProducer
+// to tell it how to handle calls to SendMessage, so you can easily test success
+// and failure scenarios.
+type SyncProducer struct {
+ l sync.Mutex
+ t ErrorReporter
+ expectations []*producerExpectation
+ lastOffset int64
+}
+
+// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument is currently unused, but is
+// maintained to be compatible with the async Producer.
+func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
+ return &SyncProducer{
+ t: t,
+ expectations: make([]*producerExpectation, 0),
+ }
+}
+
+////////////////////////////////////////////////
+// Implement SyncProducer interface
+////////////////////////////////////////////////
+
+// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
+// You have to set expectations on the mock producer before calling SendMessage, so it knows
+// how to handle them. You can set a function in each expectation so that the message value
+// checked by this function and an error is returned if the match fails.
+// If there is no more remaining expectation when SendMessage is called,
+// the mock producer will write an error to the test state object.
+func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+
+ if len(sp.expectations) > 0 {
+ expectation := sp.expectations[0]
+ sp.expectations = sp.expectations[1:]
+ if expectation.CheckFunction != nil {
+ val, err := msg.Value.Encode()
+ if err != nil {
+ sp.t.Errorf("Input message encoding failed: %s", err.Error())
+ return -1, -1, err
+ }
+
+ errCheck := expectation.CheckFunction(val)
+ if errCheck != nil {
+ sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
+ return -1, -1, errCheck
+ }
+ }
+ if expectation.Result == errProduceSuccess {
+ sp.lastOffset++
+ msg.Offset = sp.lastOffset
+ return 0, msg.Offset, nil
+ }
+ return -1, -1, expectation.Result
+ }
+ sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
+ return -1, -1, errOutOfExpectations
+}
+
+// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
+// You have to set expectations on the mock producer before calling SendMessages, so it knows
+// how to handle them. If there is no more remaining expectations when SendMessages is called,
+// the mock producer will write an error to the test state object.
+func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+
+ if len(sp.expectations) >= len(msgs) {
+ expectations := sp.expectations[0 : len(msgs)-1]
+ sp.expectations = sp.expectations[len(msgs):]
+
+ for _, expectation := range expectations {
+ if expectation.Result != errProduceSuccess {
+ return expectation.Result
+ }
+
+ }
+ return nil
+ }
+ sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
+ return errOutOfExpectations
+}
+
+// Close corresponds with the Close method of sarama's SyncProducer implementation.
+// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
+// so it will write an error to the test state if there's any remaining expectations.
+func (sp *SyncProducer) Close() error {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+
+ if len(sp.expectations) > 0 {
+ sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
+ }
+
+ return nil
+}
+
+////////////////////////////////////////////////
+// Setting expectations
+////////////////////////////////////////////////
+
+// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage
+// will be called. The mock producer will first call the given function to check the message value.
+// It will cascade the error of the function, if any, or handle the message as if it produced
+// successfully, i.e. by returning a valid partition, and offset, and a nil error.
+func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+ sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
+}
+
+// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be
+// called. The mock producer will first call the given function to check the message value.
+// It will cascade the error of the function, if any, or handle the message as if it failed
+// to produce successfully, i.e. by returning the provided error.
+func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+ sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
+}
+
+// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
+// called. The mock producer will handle the message as if it produced successfully, i.e. by
+// returning a valid partition, and offset, and a nil error.
+func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
+ sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil)
+}
+
+// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
+// called. The mock producer will handle the message as if it failed to produce
+// successfully, i.e. by returning the provided error.
+func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
+ sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err)
+}
diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
new file mode 100644
index 000000000..0fdc99877
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
@@ -0,0 +1,124 @@
+package mocks
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/Shopify/sarama"
+)
+
+func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
+ var mp interface{} = &SyncProducer{}
+ if _, ok := mp.(sarama.SyncProducer); !ok {
+ t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
+ }
+}
+
+func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
+ sp := NewSyncProducer(t, nil)
+ defer func() {
+ if err := sp.Close(); err != nil {
+ t.Error(err)
+ }
+ }()
+
+ sp.ExpectSendMessageAndSucceed()
+ sp.ExpectSendMessageAndSucceed()
+ sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
+
+ msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+
+ _, offset, err := sp.SendMessage(msg)
+ if err != nil {
+ t.Errorf("The first message should have been produced successfully, but got %s", err)
+ }
+ if offset != 1 || offset != msg.Offset {
+ t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
+ }
+
+ _, offset, err = sp.SendMessage(msg)
+ if err != nil {
+ t.Errorf("The second message should have been produced successfully, but got %s", err)
+ }
+ if offset != 2 || offset != msg.Offset {
+ t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
+ }
+
+ _, _, err = sp.SendMessage(msg)
+ if err != sarama.ErrOutOfBrokers {
+ t.Errorf("The third message should not have been produced successfully")
+ }
+
+ if err := sp.Close(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSyncProducerWithTooManyExpectations(t *testing.T) {
+ trm := newTestReporterMock()
+
+ sp := NewSyncProducer(trm, nil)
+ sp.ExpectSendMessageAndSucceed()
+ sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
+
+ msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ if _, _, err := sp.SendMessage(msg); err != nil {
+ t.Error("No error expected on first SendMessage call", err)
+ }
+
+ if err := sp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Error("Expected to report an error")
+ }
+}
+
+func TestSyncProducerWithTooFewExpectations(t *testing.T) {
+ trm := newTestReporterMock()
+
+ sp := NewSyncProducer(trm, nil)
+ sp.ExpectSendMessageAndSucceed()
+
+ msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ if _, _, err := sp.SendMessage(msg); err != nil {
+ t.Error("No error expected on first SendMessage call", err)
+ }
+ if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
+ t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
+ }
+
+ if err := sp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Error("Expected to report an error")
+ }
+}
+
+func TestSyncProducerWithCheckerFunction(t *testing.T) {
+ trm := newTestReporterMock()
+
+ sp := NewSyncProducer(trm, nil)
+ sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
+ sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
+
+ msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ if _, _, err := sp.SendMessage(msg); err != nil {
+ t.Error("No error expected on first SendMessage call, found: ", err)
+ }
+ msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
+ if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") {
+ t.Error("Error during value check expected on second SendMessage call, found:", err)
+ }
+
+ if err := sp.Close(); err != nil {
+ t.Error(err)
+ }
+
+ if len(trm.errors) != 1 {
+ t.Error("Expected to report an error")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 000000000..b21ea634b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,190 @@
+package sarama
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ metadata string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(b.offset)
+ if version == 1 {
+ pe.putInt64(b.timestamp)
+ } else if b.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+
+ return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ b.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.9.0 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 2 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ if r.Version >= 2 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_9_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go
new file mode 100644
index 000000000..afc25b7b3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go
@@ -0,0 +1,90 @@
+package sarama
+
+import "testing"
+
+var (
+ offsetCommitRequestNoBlocksV0 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetCommitRequestNoBlocksV1 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x11, 0x22,
+ 0x00, 0x04, 'c', 'o', 'n', 's',
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetCommitRequestNoBlocksV2 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x11, 0x22,
+ 0x00, 0x04, 'c', 'o', 'n', 's',
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetCommitRequestOneBlockV0 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x52, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
+ 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
+
+ offsetCommitRequestOneBlockV1 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x11, 0x22,
+ 0x00, 0x04, 'c', 'o', 'n', 's',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x52, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
+
+ offsetCommitRequestOneBlockV2 = []byte{
+ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
+ 0x00, 0x00, 0x11, 0x22,
+ 0x00, 0x04, 'c', 'o', 'n', 's',
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x52, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
+ 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
+)
+
+func TestOffsetCommitRequestV0(t *testing.T) {
+ request := new(OffsetCommitRequest)
+ request.Version = 0
+ request.ConsumerGroup = "foobar"
+ testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0)
+
+ request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
+ testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0)
+}
+
+func TestOffsetCommitRequestV1(t *testing.T) {
+ request := new(OffsetCommitRequest)
+ request.ConsumerGroup = "foobar"
+ request.ConsumerID = "cons"
+ request.ConsumerGroupGeneration = 0x1122
+ request.Version = 1
+ testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1)
+
+ request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
+ testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1)
+}
+
+func TestOffsetCommitRequestV2(t *testing.T) {
+ request := new(OffsetCommitRequest)
+ request.ConsumerGroup = "foobar"
+ request.ConsumerID = "cons"
+ request.ConsumerGroupGeneration = 0x1122
+ request.RetentionTime = 0x4433
+ request.Version = 2
+ testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2)
+
+ request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
+ testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 000000000..7f277e775
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+type OffsetCommitResponse struct {
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putInt16(int16(kerror))
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Errors[name][id] = KError(tmp)
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go
new file mode 100644
index 000000000..074ec9232
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go
@@ -0,0 +1,24 @@
+package sarama
+
+import (
+ "testing"
+)
+
+var (
+ emptyOffsetCommitResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+)
+
+func TestEmptyOffsetCommitResponse(t *testing.T) {
+ response := OffsetCommitResponse{}
+ testResponse(t, "empty", &response, emptyOffsetCommitResponse)
+}
+
+func TestNormalOffsetCommitResponse(t *testing.T) {
+ response := OffsetCommitResponse{}
+ response.AddError("t", 0, ErrNotLeaderForPartition)
+ response.Errors["m"] = make(map[int32]KError)
+ // The response encoded form cannot be checked for it varies due to
+ // unpredictable map traversal order.
+ testResponse(t, "normal", &response, nil)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 000000000..b19fe79ba
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ ConsumerGroup string
+ Version int16
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 1 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ if err = pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.partitions {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if partitionCount == 0 {
+ return nil
+ }
+ r.partitions = make(map[string][]int32)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ r.partitions[topic] = partitions
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go
new file mode 100644
index 000000000..025d725c9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go
@@ -0,0 +1,31 @@
+package sarama
+
+import "testing"
+
+var (
+ offsetFetchRequestNoGroupNoPartitions = []byte{
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetFetchRequestNoPartitions = []byte{
+ 0x00, 0x04, 'b', 'l', 'a', 'h',
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetFetchRequestOnePartition = []byte{
+ 0x00, 0x04, 'b', 'l', 'a', 'h',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x4F, 0x4F, 0x4F, 0x4F}
+)
+
+func TestOffsetFetchRequest(t *testing.T) {
+ request := new(OffsetFetchRequest)
+ testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
+
+ request.ConsumerGroup = "blah"
+ testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions)
+
+ request.AddPartition("topicTheFirst", 0x4F4F4F4F)
+ testRequest(t, "one partition", request, offsetFetchRequestOnePartition)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 000000000..323220eac
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,143 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ Metadata string
+ Err KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt64(b.Offset)
+
+ err = pe.putString(b.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt16(int16(b.Err))
+
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numBlocks == 0 {
+ r.Blocks[name] = nil
+ continue
+ }
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go
new file mode 100644
index 000000000..7614ae424
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go
@@ -0,0 +1,22 @@
+package sarama
+
+import "testing"
+
+var (
+ emptyOffsetFetchResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+)
+
+func TestEmptyOffsetFetchResponse(t *testing.T) {
+ response := OffsetFetchResponse{}
+ testResponse(t, "empty", &response, emptyOffsetFetchResponse)
+}
+
+func TestNormalOffsetFetchResponse(t *testing.T) {
+ response := OffsetFetchResponse{}
+ response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut})
+ response.Blocks["m"] = nil
+ // The response encoded form cannot be checked for it varies due to
+ // unpredictable map traversal order.
+ testResponse(t, "normal", &response, nil)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 000000000..5e15cdafe
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,542 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+
+ lock sync.Mutex
+ poms map[string]map[int32]*partitionOffsetManager
+ boms map[*Broker]*brokerOffsetManager
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ om := &offsetManager{
+ client: client,
+ conf: client.Config(),
+ group: group,
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+ boms: make(map[*Broker]*brokerOffsetManager),
+ }
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ return nil
+}
+
+func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom := om.boms[broker]
+ if bom == nil {
+ bom = om.newBrokerOffsetManager(broker)
+ om.boms[broker] = bom
+ }
+
+ bom.refs++
+
+ return bom
+}
+
+func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom.refs--
+
+ if bom.refs == 0 {
+ close(bom.updateSubscriptions)
+ if om.boms[bom.broker] == bom {
+ delete(om.boms, bom.broker)
+ }
+ }
+}
+
+func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.boms, bom.broker)
+}
+
+func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.poms[pom.topic], pom.partition)
+ if len(om.poms[pom.topic]) == 0 {
+ delete(om.poms, pom.topic)
+ }
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ clean sync.Cond
+ broker *brokerOffsetManager
+
+ errors chan *ConsumerError
+ rebalance chan none
+ dying chan none
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ pom := &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ rebalance: make(chan none, 1),
+ dying: make(chan none),
+ }
+ pom.clean.L = &pom.lock
+
+ if err := pom.selectBroker(); err != nil {
+ return nil, err
+ }
+
+ if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
+ return nil, err
+ }
+
+ pom.broker.updateSubscriptions <- pom
+
+ go withRecover(pom.mainLoop)
+
+ return pom, nil
+}
+
+func (pom *partitionOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-pom.rebalance:
+ if err := pom.selectBroker(); err != nil {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ } else {
+ pom.broker.updateSubscriptions <- pom
+ }
+ case <-pom.dying:
+ if pom.broker != nil {
+ select {
+ case <-pom.rebalance:
+ case pom.broker.updateSubscriptions <- pom:
+ }
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ }
+ pom.parent.abandonPartitionOffsetManager(pom)
+ close(pom.errors)
+ return
+ }
+ }
+}
+
+func (pom *partitionOffsetManager) selectBroker() error {
+ if pom.broker != nil {
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ pom.broker = nil
+ }
+
+ var broker *Broker
+ var err error
+
+ if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ pom.broker = pom.parent.refBrokerOffsetManager(broker)
+ return nil
+}
+
+func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
+ request := new(OffsetFetchRequest)
+ request.Version = 1
+ request.ConsumerGroup = pom.parent.group
+ request.AddPartition(pom.topic, pom.partition)
+
+ response, err := pom.broker.broker.FetchOffset(request)
+ if err != nil {
+ return err
+ }
+
+ block := response.GetBlock(pom.topic, pom.partition)
+ if block == nil {
+ return ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ pom.offset = block.Offset
+ pom.metadata = block.Metadata
+ return nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return block.Err
+ }
+ if err := pom.selectBroker(); err != nil {
+ return err
+ }
+ return pom.fetchInitialOffset(retries - 1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return block.Err
+ }
+ time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
+ return pom.fetchInitialOffset(retries - 1)
+ default:
+ return block.Err
+ }
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+ pom.clean.Signal()
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ go func() {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ for pom.dirty {
+ pom.clean.Wait()
+ }
+
+ close(pom.dying)
+ }()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+// Broker Offset Manager
+
+type brokerOffsetManager struct {
+ parent *offsetManager
+ broker *Broker
+ timer *time.Ticker
+ updateSubscriptions chan *partitionOffsetManager
+ subscriptions map[*partitionOffsetManager]none
+ refs int
+}
+
+func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ bom := &brokerOffsetManager{
+ parent: om,
+ broker: broker,
+ timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
+ updateSubscriptions: make(chan *partitionOffsetManager),
+ subscriptions: make(map[*partitionOffsetManager]none),
+ }
+
+ go withRecover(bom.mainLoop)
+
+ return bom
+}
+
+func (bom *brokerOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-bom.timer.C:
+ if len(bom.subscriptions) > 0 {
+ bom.flushToBroker()
+ }
+ case s, ok := <-bom.updateSubscriptions:
+ if !ok {
+ bom.timer.Stop()
+ return
+ }
+ if _, ok := bom.subscriptions[s]; ok {
+ delete(bom.subscriptions, s)
+ } else {
+ bom.subscriptions[s] = none{}
+ }
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) flushToBroker() {
+ request := bom.constructRequest()
+ if request == nil {
+ return
+ }
+
+ response, err := bom.broker.CommitOffset(request)
+
+ if err != nil {
+ bom.abort(err)
+ return
+ }
+
+ for s := range bom.subscriptions {
+ if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if response.Errors[s.topic] == nil {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+ if err, ok = response.Errors[s.topic][s.partition]; !ok {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := request.blocks[s.topic][s.partition]
+ s.updateCommitted(block.offset, block.metadata)
+ case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+ // not a critical error, we just need to redispatch
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+ // nothing we can do about this, just tell the user and carry on
+ s.handleError(err)
+ case ErrOffsetsLoadInProgress:
+ // nothing wrong but we didn't commit, we'll get it next time round
+ break
+ case ErrUnknownTopicOrPartition:
+ // let the user know *and* try redispatching - if topic-auto-create is
+ // enabled, redispatching should trigger a metadata request and create the
+ // topic; if not then re-dispatching won't help, but we've let the user
+ // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+ fallthrough
+ default:
+ // dunno, tell the user and try redispatching
+ s.handleError(err)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
+ var r *OffsetCommitRequest
+ var perPartitionTimestamp int64
+ if bom.parent.conf.Consumer.Offsets.Retention == 0 {
+ perPartitionTimestamp = ReceiveTime
+ r = &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+ } else {
+ r = &OffsetCommitRequest{
+ Version: 2,
+ RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+
+ }
+
+ for s := range bom.subscriptions {
+ s.lock.Lock()
+ if s.dirty {
+ r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
+ }
+ s.lock.Unlock()
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (bom *brokerOffsetManager) abort(err error) {
+ _ = bom.broker.Close() // we don't care about the error this might return, we already have one
+ bom.parent.abandonBroker(bom)
+
+ for pom := range bom.subscriptions {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ }
+
+ for s := range bom.updateSubscriptions {
+ if _, ok := bom.subscriptions[s]; !ok {
+ s.handleError(err)
+ s.rebalance <- none{}
+ }
+ }
+
+ bom.subscriptions = make(map[*partitionOffsetManager]none)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager_test.go b/vendor/github.com/Shopify/sarama/offset_manager_test.go
new file mode 100644
index 000000000..c111a5a63
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager_test.go
@@ -0,0 +1,369 @@
+package sarama
+
+import (
+ "testing"
+ "time"
+)
+
+func initOffsetManager(t *testing.T) (om OffsetManager,
+ testClient Client, broker, coordinator *MockBroker) {
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 1
+ config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond
+ config.Version = V0_9_0_0
+
+ broker = NewMockBroker(t, 1)
+ coordinator = NewMockBroker(t, 2)
+
+ seedMeta := new(MetadataResponse)
+ seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID())
+ seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError)
+ seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError)
+ broker.Returns(seedMeta)
+
+ var err error
+ testClient, err = NewClient([]string{broker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: coordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: coordinator.Port(),
+ })
+
+ om, err = NewOffsetManagerFromClient("group", testClient)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return om, testClient, broker, coordinator
+}
+
+func initPartitionOffsetManager(t *testing.T, om OffsetManager,
+ coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
+
+ fetchResponse := new(OffsetFetchResponse)
+ fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
+ Err: ErrNoError,
+ Offset: initialOffset,
+ Metadata: metadata,
+ })
+ coordinator.Returns(fetchResponse)
+
+ pom, err := om.ManagePartition("my_topic", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return pom
+}
+
+func TestNewOffsetManager(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ seedBroker.Returns(new(MetadataResponse))
+
+ testClient, err := NewClient([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = NewOffsetManagerFromClient("group", testClient)
+ if err != nil {
+ t.Error(err)
+ }
+
+ safeClose(t, testClient)
+
+ _, err = NewOffsetManagerFromClient("group", testClient)
+ if err != ErrClosedClient {
+ t.Errorf("Error expected for closed client; actual value: %v", err)
+ }
+
+ seedBroker.Close()
+}
+
+// Test recovery from ErrNotCoordinatorForConsumer
+// on first fetchInitialOffset call
+func TestOffsetManagerFetchInitialFail(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+
+ // Error on first fetchInitialOffset call
+ responseBlock := OffsetFetchResponseBlock{
+ Err: ErrNotCoordinatorForConsumer,
+ Offset: 5,
+ Metadata: "test_meta",
+ }
+
+ fetchResponse := new(OffsetFetchResponse)
+ fetchResponse.AddBlock("my_topic", 0, &responseBlock)
+ coordinator.Returns(fetchResponse)
+
+ // Refresh coordinator
+ newCoordinator := NewMockBroker(t, 3)
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ // Second fetchInitialOffset call is fine
+ fetchResponse2 := new(OffsetFetchResponse)
+ responseBlock2 := responseBlock
+ responseBlock2.Err = ErrNoError
+ fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
+ newCoordinator.Returns(fetchResponse2)
+
+ pom, err := om.ManagePartition("my_topic", 0)
+ if err != nil {
+ t.Error(err)
+ }
+
+ broker.Close()
+ coordinator.Close()
+ newCoordinator.Close()
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+}
+
+// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress
+func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+
+ // Error on first fetchInitialOffset call
+ responseBlock := OffsetFetchResponseBlock{
+ Err: ErrOffsetsLoadInProgress,
+ Offset: 5,
+ Metadata: "test_meta",
+ }
+
+ fetchResponse := new(OffsetFetchResponse)
+ fetchResponse.AddBlock("my_topic", 0, &responseBlock)
+ coordinator.Returns(fetchResponse)
+
+ // Second fetchInitialOffset call is fine
+ fetchResponse2 := new(OffsetFetchResponse)
+ responseBlock2 := responseBlock
+ responseBlock2.Err = ErrNoError
+ fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
+ coordinator.Returns(fetchResponse2)
+
+ pom, err := om.ManagePartition("my_topic", 0)
+ if err != nil {
+ t.Error(err)
+ }
+
+ broker.Close()
+ coordinator.Close()
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+}
+
+func TestPartitionOffsetManagerInitialOffset(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ testClient.Config().Consumer.Offsets.Initial = OffsetOldest
+
+ // Kafka returns -1 if no offset has been stored for this partition yet.
+ pom := initPartitionOffsetManager(t, om, coordinator, -1, "")
+
+ offset, meta := pom.NextOffset()
+ if offset != OffsetOldest {
+ t.Errorf("Expected offset 5. Actual: %v", offset)
+ }
+ if meta != "" {
+ t.Errorf("Expected metadata to be empty. Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ broker.Close()
+ coordinator.Close()
+ safeClose(t, testClient)
+}
+
+func TestPartitionOffsetManagerNextOffset(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta")
+
+ offset, meta := pom.NextOffset()
+ if offset != 5 {
+ t.Errorf("Expected offset 5. Actual: %v", offset)
+ }
+ if meta != "test_meta" {
+ t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ broker.Close()
+ coordinator.Close()
+ safeClose(t, testClient)
+}
+
+func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
+
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrNoError)
+ coordinator.Returns(ocResponse)
+
+ pom.MarkOffset(100, "modified_meta")
+ offset, meta := pom.NextOffset()
+
+ if offset != 100 {
+ t.Errorf("Expected offset 100. Actual: %v", offset)
+ }
+ if meta != "modified_meta" {
+ t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+ broker.Close()
+ coordinator.Close()
+}
+
+func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ testClient.Config().Consumer.Offsets.Retention = time.Hour
+
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
+
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrNoError)
+ handler := func(req *request) (res encoder) {
+ if req.body.version() != 2 {
+ t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
+ }
+ offsetCommitRequest := req.body.(*OffsetCommitRequest)
+ if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
+ t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
+ }
+ return ocResponse
+ }
+ coordinator.setHandler(handler)
+
+ pom.MarkOffset(100, "modified_meta")
+ offset, meta := pom.NextOffset()
+
+ if offset != 100 {
+ t.Errorf("Expected offset 100. Actual: %v", offset)
+ }
+ if meta != "modified_meta" {
+ t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+ broker.Close()
+ coordinator.Close()
+}
+
+func TestPartitionOffsetManagerCommitErr(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
+
+ // Error on one partition
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
+ ocResponse.AddError("my_topic", 1, ErrNoError)
+ coordinator.Returns(ocResponse)
+
+ newCoordinator := NewMockBroker(t, 3)
+
+ // For RefreshCoordinator()
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ // Nothing in response.Errors at all
+ ocResponse2 := new(OffsetCommitResponse)
+ newCoordinator.Returns(ocResponse2)
+
+ // For RefreshCoordinator()
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ // Error on the wrong partition for this pom
+ ocResponse3 := new(OffsetCommitResponse)
+ ocResponse3.AddError("my_topic", 1, ErrNoError)
+ newCoordinator.Returns(ocResponse3)
+
+ // For RefreshCoordinator()
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block
+ ocResponse4 := new(OffsetCommitResponse)
+ ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition)
+ newCoordinator.Returns(ocResponse4)
+
+ // For RefreshCoordinator()
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ // Normal error response
+ ocResponse5 := new(OffsetCommitResponse)
+ ocResponse5.AddError("my_topic", 0, ErrNoError)
+ newCoordinator.Returns(ocResponse5)
+
+ pom.MarkOffset(100, "modified_meta")
+
+ err := pom.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ broker.Close()
+ coordinator.Close()
+ newCoordinator.Close()
+ safeClose(t, om)
+ safeClose(t, testClient)
+}
+
+// Test of recovery from abort
+func TestAbortPartitionOffsetManager(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
+
+ // this triggers an error in the CommitOffset request,
+ // which leads to the abort call
+ coordinator.Close()
+
+ // Response to refresh coordinator request
+ newCoordinator := NewMockBroker(t, 3)
+ broker.Returns(&ConsumerMetadataResponse{
+ CoordinatorID: newCoordinator.BrokerID(),
+ CoordinatorHost: "127.0.0.1",
+ CoordinatorPort: newCoordinator.Port(),
+ })
+
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrNoError)
+ newCoordinator.Returns(ocResponse)
+
+ pom.MarkOffset(100, "modified_meta")
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ broker.Close()
+ safeClose(t, testClient)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 000000000..6c2696016
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,132 @@
+package sarama
+
+type offsetRequestBlock struct {
+ time int64
+ maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(int64(b.time))
+ if version == 0 {
+ pe.putInt32(b.maxOffsets)
+ }
+
+ return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.time, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 0 {
+ if b.maxOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type OffsetRequest struct {
+ Version int16
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ // Ignore replica ID
+ if _, err := pd.getInt32(); err != nil {
+ return err
+ }
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd, version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.time = time
+ if r.Version == 0 {
+ tmp.maxOffsets = maxOffsets
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request_test.go b/vendor/github.com/Shopify/sarama/offset_request_test.go
new file mode 100644
index 000000000..9ce562c99
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request_test.go
@@ -0,0 +1,43 @@
+package sarama
+
+import "testing"
+
+var (
+ offsetRequestNoBlocks = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00}
+
+ offsetRequestOneBlock = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03, 'f', 'o', 'o',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x02}
+
+ offsetRequestOneBlockV1 = []byte{
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03, 'b', 'a', 'r',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
+)
+
+func TestOffsetRequest(t *testing.T) {
+ request := new(OffsetRequest)
+ testRequest(t, "no blocks", request, offsetRequestNoBlocks)
+
+ request.AddBlock("foo", 4, 1, 2)
+ testRequest(t, "one block", request, offsetRequestOneBlock)
+}
+
+func TestOffsetRequestV1(t *testing.T) {
+ request := new(OffsetRequest)
+ request.Version = 1
+ testRequest(t, "no blocks", request, offsetRequestNoBlocks)
+
+ request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1
+ testRequest(t, "one block", request, offsetRequestOneBlockV1)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 000000000..9a9cfe96f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+ Err KError
+ Offsets []int64 // Version 0
+ Offset int64 // Version 1
+ Timestamp int64 // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ if version == 0 {
+ b.Offsets, err = pd.getInt64Array()
+
+ return err
+ }
+
+ b.Timestamp, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility put the offset in the offsets array too
+ b.Offsets = []int64{b.Offset}
+
+ return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ if version == 0 {
+ return pe.putInt64Array(b.Offsets)
+ }
+
+ pe.putInt64(b.Timestamp)
+ pe.putInt64(b.Offset)
+
+ return nil
+}
+
+type OffsetResponse struct {
+ Version int16
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1]
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.version()); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+ return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response_test.go b/vendor/github.com/Shopify/sarama/offset_response_test.go
new file mode 100644
index 000000000..0df6c9f3e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response_test.go
@@ -0,0 +1,111 @@
+package sarama
+
+import "testing"
+
+var (
+ emptyOffsetResponse = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+
+ normalOffsetResponse = []byte{
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x01, 'a',
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x01, 'z',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
+
+ normalOffsetResponseV1 = []byte{
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x01, 'a',
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x01, 'z',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
+)
+
+func TestEmptyOffsetResponse(t *testing.T) {
+ response := OffsetResponse{}
+
+ testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0)
+ if len(response.Blocks) != 0 {
+ t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
+ }
+
+ response = OffsetResponse{}
+
+ testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1)
+ if len(response.Blocks) != 0 {
+ t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
+ }
+}
+
+func TestNormalOffsetResponse(t *testing.T) {
+ response := OffsetResponse{}
+
+ testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0)
+
+ if len(response.Blocks) != 2 {
+ t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
+ }
+
+ if len(response.Blocks["a"]) != 0 {
+ t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
+ }
+
+ if len(response.Blocks["z"]) != 1 {
+ t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
+ }
+
+ if response.Blocks["z"][2].Err != ErrNoError {
+ t.Fatal("Decoding produced invalid error for topic z partition 2.")
+ }
+
+ if len(response.Blocks["z"][2].Offsets) != 2 {
+ t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
+ }
+
+ if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
+ t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
+ }
+}
+
+func TestNormalOffsetResponseV1(t *testing.T) {
+ response := OffsetResponse{}
+
+ testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1)
+
+ if len(response.Blocks) != 2 {
+ t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
+ }
+
+ if len(response.Blocks["a"]) != 0 {
+ t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
+ }
+
+ if len(response.Blocks["z"]) != 1 {
+ t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
+ }
+
+ if response.Blocks["z"][2].Err != ErrNoError {
+ t.Fatal("Decoding produced invalid error for topic z partition 2.")
+ }
+
+ if response.Blocks["z"][2].Timestamp != 1477920049286 {
+ t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp)
+ }
+
+ if response.Blocks["z"][2].Offset != 6 {
+ t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 000000000..28670c0e6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,45 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getArrayLength() (int, error)
+
+ // Collections
+ getBytes() ([]byte, error)
+ getString() (string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 000000000..27a10f6d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,50 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putArrayLength(in int) error
+
+ // Collections
+ putBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+
+ // Provide the current offset to record the batch size metric
+ offset() int
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 000000000..972932728
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,135 @@
+package sarama
+
+import (
+ "hash"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = hasher()
+ return p
+ }
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ partition := int32(p.hasher.Sum32()) % numPartitions
+ if partition < 0 {
+ partition = -partition
+ }
+ return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner_test.go b/vendor/github.com/Shopify/sarama/partitioner_test.go
new file mode 100644
index 000000000..83376431f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner_test.go
@@ -0,0 +1,265 @@
+package sarama
+
+import (
+ "crypto/rand"
+ "hash/fnv"
+ "log"
+ "testing"
+)
+
+func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) {
+ choice, err := partitioner.Partition(message, numPartitions)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= numPartitions {
+ t.Error(partitioner, "returned partition", choice, "outside of range for", message)
+ }
+ for i := 1; i < 50; i++ {
+ newChoice, err := partitioner.Partition(message, numPartitions)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if newChoice != choice {
+ t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".")
+ }
+ }
+}
+
+func TestRandomPartitioner(t *testing.T) {
+ partitioner := NewRandomPartitioner("mytopic")
+
+ choice, err := partitioner.Partition(nil, 1)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != 0 {
+ t.Error("Returned non-zero partition when only one available.")
+ }
+
+ for i := 1; i < 50; i++ {
+ choice, err := partitioner.Partition(nil, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= 50 {
+ t.Error("Returned partition", choice, "outside of range.")
+ }
+ }
+}
+
+func TestRoundRobinPartitioner(t *testing.T) {
+ partitioner := NewRoundRobinPartitioner("mytopic")
+
+ choice, err := partitioner.Partition(nil, 1)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != 0 {
+ t.Error("Returned non-zero partition when only one available.")
+ }
+
+ var i int32
+ for i = 1; i < 50; i++ {
+ choice, err := partitioner.Partition(nil, 7)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != i%7 {
+ t.Error("Returned partition", choice, "expecting", i%7)
+ }
+ }
+}
+
+func TestNewHashPartitionerWithHasher(t *testing.T) {
+ // use the current default hasher fnv.New32a()
+ partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
+
+ choice, err := partitioner.Partition(&ProducerMessage{}, 1)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != 0 {
+ t.Error("Returned non-zero partition when only one available.")
+ }
+
+ for i := 1; i < 50; i++ {
+ choice, err := partitioner.Partition(&ProducerMessage{}, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= 50 {
+ t.Error("Returned partition", choice, "outside of range for nil key.")
+ }
+ }
+
+ buf := make([]byte, 256)
+ for i := 1; i < 50; i++ {
+ if _, err := rand.Read(buf); err != nil {
+ t.Error(err)
+ }
+ assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
+ }
+}
+
+func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
+ // use the current default hasher fnv.New32a()
+ partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
+
+ msg := ProducerMessage{}
+ // "1468509572224" generates 2147483648 (uint32) result from Sum32 function
+ // which is -2147483648 or int32's min value
+ msg.Key = StringEncoder("1468509572224")
+
+ choice, err := partitioner.Partition(&msg, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= 50 {
+ t.Error("Returned partition", choice, "outside of range for nil key.")
+ }
+}
+
+func TestHashPartitioner(t *testing.T) {
+ partitioner := NewHashPartitioner("mytopic")
+
+ choice, err := partitioner.Partition(&ProducerMessage{}, 1)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != 0 {
+ t.Error("Returned non-zero partition when only one available.")
+ }
+
+ for i := 1; i < 50; i++ {
+ choice, err := partitioner.Partition(&ProducerMessage{}, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= 50 {
+ t.Error("Returned partition", choice, "outside of range for nil key.")
+ }
+ }
+
+ buf := make([]byte, 256)
+ for i := 1; i < 50; i++ {
+ if _, err := rand.Read(buf); err != nil {
+ t.Error(err)
+ }
+ assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
+ }
+}
+
+func TestHashPartitionerMinInt32(t *testing.T) {
+ partitioner := NewHashPartitioner("mytopic")
+
+ msg := ProducerMessage{}
+ // "1468509572224" generates 2147483648 (uint32) result from Sum32 function
+ // which is -2147483648 or int32's min value
+ msg.Key = StringEncoder("1468509572224")
+
+ choice, err := partitioner.Partition(&msg, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice < 0 || choice >= 50 {
+ t.Error("Returned partition", choice, "outside of range for nil key.")
+ }
+}
+
+func TestManualPartitioner(t *testing.T) {
+ partitioner := NewManualPartitioner("mytopic")
+
+ choice, err := partitioner.Partition(&ProducerMessage{}, 1)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != 0 {
+ t.Error("Returned non-zero partition when only one available.")
+ }
+
+ for i := int32(1); i < 50; i++ {
+ choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50)
+ if err != nil {
+ t.Error(partitioner, err)
+ }
+ if choice != i {
+ t.Error("Returned partition not the same as the input partition")
+ }
+ }
+}
+
+// By default, Sarama uses the message's key to consistently assign a partition to
+// a message using hashing. If no key is set, a random partition will be chosen.
+// This example shows how you can partition messages randomly, even when a key is set,
+// by overriding Config.Producer.Partitioner.
+func ExamplePartitioner_random() {
+ config := NewConfig()
+ config.Producer.Partitioner = NewRandomPartitioner
+
+ producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := producer.Close(); err != nil {
+ log.Println("Failed to close producer:", err)
+ }
+ }()
+
+ msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")}
+ partition, offset, err := producer.SendMessage(msg)
+ if err != nil {
+ log.Fatalln("Failed to produce message to kafka cluster.")
+ }
+
+ log.Printf("Produced message to partition %d with offset %d", partition, offset)
+}
+
+// This example shows how to assign partitions to your messages manually.
+func ExamplePartitioner_manual() {
+ config := NewConfig()
+
+ // First, we tell the producer that we are going to partition ourselves.
+ config.Producer.Partitioner = NewManualPartitioner
+
+ producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := producer.Close(); err != nil {
+ log.Println("Failed to close producer:", err)
+ }
+ }()
+
+ // Now, we set the Partition field of the ProducerMessage struct.
+ msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")}
+
+ partition, offset, err := producer.SendMessage(msg)
+ if err != nil {
+ log.Fatalln("Failed to produce message to kafka cluster.")
+ }
+
+ if partition != 6 {
+ log.Fatal("Message should have been produced to partition 6!")
+ }
+
+ log.Printf("Produced message to partition %d with offset %d", partition, offset)
+}
+
+// This example shows how to set a different partitioner depending on the topic.
+func ExamplePartitioner_per_topic() {
+ config := NewConfig()
+ config.Producer.Partitioner = func(topic string) Partitioner {
+ switch topic {
+ case "access_log", "error_log":
+ return NewRandomPartitioner(topic)
+
+ default:
+ return NewHashPartitioner(topic)
+ }
+ }
+
+ // ...
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 000000000..fd5ea0f91
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,121 @@
+package sarama
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+ length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) offset() int {
+ return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ pe.length += in.reserveLength()
+}
+
+func (pe *prepEncoder) pop() error {
+ return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 000000000..40dc80151
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,209 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all in-sync replicas to commit before responding.
+ // The minimum number of in-sync replicas is configured on the broker via
+ // the `min.insync.replicas` configuration key.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ RequiredAcks RequiredAcks
+ Timeout int32
+ Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
+ msgSets map[string]map[int32]*MessageSet
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.RequiredAcks))
+ pe.putInt32(r.Timeout)
+ err := pe.putArrayLength(len(r.msgSets))
+ if err != nil {
+ return err
+ }
+ metricRegistry := pe.metricRegistry()
+ var batchSizeMetric metrics.Histogram
+ var compressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+ compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+ }
+
+ totalRecordCount := int64(0)
+ for topic, partitions := range r.msgSets {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ topicRecordCount := int64(0)
+ var topicCompressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+ }
+ for id, msgSet := range partitions {
+ startOffset := pe.offset()
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = msgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ if metricRegistry != nil {
+ for _, messageBlock := range msgSet.Messages {
+ // Is this a fake "message" wrapping real messages?
+ if messageBlock.Msg.Set != nil {
+ topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+ } else {
+ // A single uncompressed message
+ topicRecordCount++
+ }
+ // Better be safe than sorry when computing the compression ratio
+ if messageBlock.Msg.compressedSize != 0 {
+ compressionRatio := float64(len(messageBlock.Msg.Value)) /
+ float64(messageBlock.Msg.compressedSize)
+ // Histogram do not support decimal values, let's multiple it by 100 for better precision
+ intCompressionRatio := int64(100 * compressionRatio)
+ compressionRatioMetric.Update(intCompressionRatio)
+ topicCompressionRatioMetric.Update(intCompressionRatio)
+ }
+ }
+ batchSize := int64(pe.offset() - startOffset)
+ batchSizeMetric.Update(batchSize)
+ getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+ }
+ }
+ if topicRecordCount > 0 {
+ getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+ getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+ totalRecordCount += topicRecordCount
+ }
+ }
+ if totalRecordCount > 0 {
+ metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+ getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.RequiredAcks = RequiredAcks(requiredAcks)
+ if r.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ messageSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ msgSetDecoder, err := pd.getSubset(int(messageSetSize))
+ if err != nil {
+ return err
+ }
+ msgSet := &MessageSet{}
+ err = msgSet.decode(msgSetDecoder)
+ if err != nil {
+ return err
+ }
+ r.msgSets[topic][partition] = msgSet
+ }
+ }
+ return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+ return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ if r.msgSets == nil {
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if r.msgSets[topic] == nil {
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ set := r.msgSets[topic][partition]
+
+ if set == nil {
+ set = new(MessageSet)
+ r.msgSets[topic][partition] = set
+ }
+
+ set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ if r.msgSets == nil {
+ r.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if r.msgSets[topic] == nil {
+ r.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ r.msgSets[topic][partition] = set
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request_test.go b/vendor/github.com/Shopify/sarama/produce_request_test.go
new file mode 100644
index 000000000..21f4ba5b1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request_test.go
@@ -0,0 +1,47 @@
+package sarama
+
+import (
+ "testing"
+)
+
+var (
+ produceRequestEmpty = []byte{
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+
+ produceRequestHeader = []byte{
+ 0x01, 0x23,
+ 0x00, 0x00, 0x04, 0x44,
+ 0x00, 0x00, 0x00, 0x00}
+
+ produceRequestOneMessage = []byte{
+ 0x01, 0x23,
+ 0x00, 0x00, 0x04, 0x44,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 't', 'o', 'p', 'i', 'c',
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0xAD,
+ 0x00, 0x00, 0x00, 0x1C,
+ // messageSet
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10,
+ // message
+ 0x23, 0x96, 0x4a, 0xf7, // CRC
+ 0x00,
+ 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
+)
+
+func TestProduceRequest(t *testing.T) {
+ request := new(ProduceRequest)
+ testRequest(t, "empty", request, produceRequestEmpty)
+
+ request.RequiredAcks = 0x123
+ request.Timeout = 0x444
+ testRequest(t, "header", request, produceRequestHeader)
+
+ request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
+ testRequest(t, "one message", request, produceRequestOneMessage)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 000000000..3f05dd9fb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,159 @@
+package sarama
+
+import "time"
+
+type ProduceResponseBlock struct {
+ Err KError
+ Offset int64
+ // only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+ Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if millis, err := pd.getInt64(); err != nil {
+ return err
+ } else if millis != -1 {
+ b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock
+ Version int16
+ ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ if r.Version >= 1 {
+ millis, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.ThrottleTime = time.Duration(millis) * time.Millisecond
+ }
+
+ return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ pe.putInt16(int16(prb.Err))
+ pe.putInt64(prb.Offset)
+ }
+ }
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+ return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+ return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &ProduceResponseBlock{Err: err}
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response_test.go b/vendor/github.com/Shopify/sarama/produce_response_test.go
new file mode 100644
index 000000000..f71709fe8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response_test.go
@@ -0,0 +1,67 @@
+package sarama
+
+import "testing"
+
+var (
+ produceResponseNoBlocks = []byte{
+ 0x00, 0x00, 0x00, 0x00}
+
+ produceResponseManyBlocks = []byte{
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x03, 'f', 'o', 'o',
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x03, 'b', 'a', 'r',
+ 0x00, 0x00, 0x00, 0x02,
+
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
+
+ 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+)
+
+func TestProduceResponse(t *testing.T) {
+ response := ProduceResponse{}
+
+ testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0)
+ if len(response.Blocks) != 0 {
+ t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
+ }
+
+ testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0)
+ if len(response.Blocks) != 2 {
+ t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
+ }
+ if len(response.Blocks["foo"]) != 0 {
+ t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
+ }
+ if len(response.Blocks["bar"]) != 2 {
+ t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
+ }
+ block := response.GetBlock("bar", 1)
+ if block == nil {
+ t.Error("Decoding did not produce a block for bar/1")
+ } else {
+ if block.Err != ErrNoError {
+ t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
+ }
+ if block.Offset != 0xFF {
+ t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
+ }
+ }
+ block = response.GetBlock("bar", 2)
+ if block == nil {
+ t.Error("Decoding did not produce a block for bar/2")
+ } else {
+ if block.Err != ErrInvalidMessage {
+ t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
+ }
+ if block.Offset != 0 {
+ t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
+ }
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 000000000..158d9c475
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,176 @@
+package sarama
+
+import "time"
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ setToSend *MessageSet
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ set = &partitionSet{setToSend: new(MessageSet)}
+ partitions[msg.Partition] = set
+ }
+
+ set.msgs = append(set.msgs, msg)
+ msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ if msg.Timestamp.IsZero() {
+ msgToSend.Timestamp = time.Now()
+ } else {
+ msgToSend.Timestamp = msg.Timestamp
+ }
+ msgToSend.Version = 1
+ }
+ set.setToSend.addMessage(msgToSend)
+
+ size := producerMessageOverhead + len(key) + len(val)
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 2
+ }
+
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.setToSend)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+ payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ compMsg := &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ Key: nil,
+ Value: payload,
+ Set: set.setToSend, // Provide the underlying message set for accurate metrics
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ compMsg.Version = 1
+ compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
+ }
+ req.AddMessage(topic, partition, compMsg)
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set.msgs)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a compressed message-batch for this partition?
+ case ps.parent.conf.Producer.Compression != CompressionNone &&
+ ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set_test.go b/vendor/github.com/Shopify/sarama/produce_set_test.go
new file mode 100644
index 000000000..d016a10b7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set_test.go
@@ -0,0 +1,185 @@
+package sarama
+
+import (
+ "testing"
+ "time"
+)
+
+func makeProduceSet() (*asyncProducer, *produceSet) {
+ parent := &asyncProducer{
+ conf: NewConfig(),
+ }
+ return parent, newProduceSet(parent)
+}
+
+func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) {
+ if err := ps.add(msg); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestProduceSetInitial(t *testing.T) {
+ _, ps := makeProduceSet()
+
+ if !ps.empty() {
+ t.Error("New produceSet should be empty")
+ }
+
+ if ps.readyToFlush() {
+ t.Error("Empty produceSet must never be ready to flush")
+ }
+}
+
+func TestProduceSetAddingMessages(t *testing.T) {
+ parent, ps := makeProduceSet()
+ parent.conf.Producer.Flush.MaxMessages = 1000
+
+ msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)}
+ safeAddMessage(t, ps, msg)
+
+ if ps.empty() {
+ t.Error("set shouldn't be empty when a message is added")
+ }
+
+ if !ps.readyToFlush() {
+ t.Error("by default set should be ready to flush when any message is in place")
+ }
+
+ for i := 0; i < 999; i++ {
+ if ps.wouldOverflow(msg) {
+ t.Error("set shouldn't fill up after only", i+1, "messages")
+ }
+ safeAddMessage(t, ps, msg)
+ }
+
+ if !ps.wouldOverflow(msg) {
+ t.Error("set should be full after 1000 messages")
+ }
+}
+
+func TestProduceSetPartitionTracking(t *testing.T) {
+ _, ps := makeProduceSet()
+
+ m1 := &ProducerMessage{Topic: "t1", Partition: 0}
+ m2 := &ProducerMessage{Topic: "t1", Partition: 1}
+ m3 := &ProducerMessage{Topic: "t2", Partition: 0}
+ safeAddMessage(t, ps, m1)
+ safeAddMessage(t, ps, m2)
+ safeAddMessage(t, ps, m3)
+
+ seenT1P0 := false
+ seenT1P1 := false
+ seenT2P0 := false
+
+ ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ if len(msgs) != 1 {
+ t.Error("Wrong message count")
+ }
+
+ if topic == "t1" && partition == 0 {
+ seenT1P0 = true
+ } else if topic == "t1" && partition == 1 {
+ seenT1P1 = true
+ } else if topic == "t2" && partition == 0 {
+ seenT2P0 = true
+ }
+ })
+
+ if !seenT1P0 {
+ t.Error("Didn't see t1p0")
+ }
+ if !seenT1P1 {
+ t.Error("Didn't see t1p1")
+ }
+ if !seenT2P0 {
+ t.Error("Didn't see t2p0")
+ }
+
+ if len(ps.dropPartition("t1", 1)) != 1 {
+ t.Error("Got wrong messages back from dropping partition")
+ }
+
+ if ps.bufferCount != 2 {
+ t.Error("Incorrect buffer count after dropping partition")
+ }
+}
+
+func TestProduceSetRequestBuilding(t *testing.T) {
+ parent, ps := makeProduceSet()
+ parent.conf.Producer.RequiredAcks = WaitForAll
+ parent.conf.Producer.Timeout = 10 * time.Second
+
+ msg := &ProducerMessage{
+ Topic: "t1",
+ Partition: 0,
+ Key: StringEncoder(TestMessage),
+ Value: StringEncoder(TestMessage),
+ }
+ for i := 0; i < 10; i++ {
+ safeAddMessage(t, ps, msg)
+ }
+ msg.Partition = 1
+ for i := 0; i < 10; i++ {
+ safeAddMessage(t, ps, msg)
+ }
+ msg.Topic = "t2"
+ for i := 0; i < 10; i++ {
+ safeAddMessage(t, ps, msg)
+ }
+
+ req := ps.buildRequest()
+
+ if req.RequiredAcks != WaitForAll {
+ t.Error("RequiredAcks not set properly")
+ }
+
+ if req.Timeout != 10000 {
+ t.Error("Timeout not set properly")
+ }
+
+ if len(req.msgSets) != 2 {
+ t.Error("Wrong number of topics in request")
+ }
+}
+
+func TestProduceSetCompressedRequestBuilding(t *testing.T) {
+ parent, ps := makeProduceSet()
+ parent.conf.Producer.RequiredAcks = WaitForAll
+ parent.conf.Producer.Timeout = 10 * time.Second
+ parent.conf.Producer.Compression = CompressionGZIP
+ parent.conf.Version = V0_10_0_0
+
+ msg := &ProducerMessage{
+ Topic: "t1",
+ Partition: 0,
+ Key: StringEncoder(TestMessage),
+ Value: StringEncoder(TestMessage),
+ Timestamp: time.Now(),
+ }
+ for i := 0; i < 10; i++ {
+ safeAddMessage(t, ps, msg)
+ }
+
+ req := ps.buildRequest()
+
+ if req.Version != 2 {
+ t.Error("Wrong request version")
+ }
+
+ for _, msgBlock := range req.msgSets["t1"][0].Messages {
+ msg := msgBlock.Msg
+ err := msg.decodeSet()
+ if err != nil {
+ t.Error("Failed to decode set from payload")
+ }
+ for _, compMsgBlock := range msg.Set.Messages {
+ compMsg := compMsgBlock.Msg
+ if compMsg.Version != 1 {
+ t.Error("Wrong compressed message version")
+ }
+ }
+ if msg.Version != 1 {
+ t.Error("Wrong compressed parent message version")
+ }
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 000000000..3cf93533a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,260 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > 2*math.MaxUint16 {
+ return -1, errInvalidArrayLength
+ }
+ return tmp, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+
+ if err != nil {
+ return nil, err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return nil, errInvalidByteSliceLength
+ case n == -1:
+ return nil, nil
+ case n == 0:
+ return make([]byte, 0), nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ tmpStr := rd.raw[rd.off : rd.off+n]
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ tmp, err := rd.getInt16()
+
+ if err != nil {
+ return "", err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return "", errInvalidStringLength
+ case n == -1:
+ return "", nil
+ case n == 0:
+ return "", nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return "", ErrInsufficientData
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ if length < 0 {
+ return nil, errInvalidSubsetSize
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return &realDecoder{raw: rd.raw[start:rd.off]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ reserve := in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 000000000..ced4267c3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,129 @@
+package sarama
+
+import (
+ "encoding/binary"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+ registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) offset() int {
+ return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+ return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 000000000..73310ca87
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,119 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type protocolBody interface {
+ encoder
+ versionedDecoder
+ key() int16
+ version() int16
+ requiredVersion() KafkaVersion
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+ err = pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+ err = r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ var key int16
+ if key, err = pd.getInt16(); err != nil {
+ return err
+ }
+ var version int16
+ if version, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if r.correlationID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ r.clientID, err = pd.getString()
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+ return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
+ lengthBytes := make([]byte, 4)
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(lengthBytes)
+
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+ if length <= 4 || length > MaxRequestSize {
+ return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(encodedReq)
+
+ req = &request{}
+ if err := decode(encodedReq, req); err != nil {
+ return nil, bytesRead, err
+ }
+ return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+ switch key {
+ case 0:
+ return &ProduceRequest{}
+ case 1:
+ return &FetchRequest{}
+ case 2:
+ return &OffsetRequest{Version: version}
+ case 3:
+ return &MetadataRequest{}
+ case 8:
+ return &OffsetCommitRequest{Version: version}
+ case 9:
+ return &OffsetFetchRequest{}
+ case 10:
+ return &ConsumerMetadataRequest{}
+ case 11:
+ return &JoinGroupRequest{}
+ case 12:
+ return &HeartbeatRequest{}
+ case 13:
+ return &LeaveGroupRequest{}
+ case 14:
+ return &SyncGroupRequest{}
+ case 15:
+ return &DescribeGroupsRequest{}
+ case 16:
+ return &ListGroupsRequest{}
+ case 17:
+ return &SaslHandshakeRequest{}
+ case 18:
+ return &ApiVersionsRequest{}
+ }
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/request_test.go b/vendor/github.com/Shopify/sarama/request_test.go
new file mode 100644
index 000000000..bd9cef4eb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request_test.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+type testRequestBody struct {
+}
+
+func (s *testRequestBody) key() int16 {
+ return 0x666
+}
+
+func (s *testRequestBody) version() int16 {
+ return 0xD2
+}
+
+func (s *testRequestBody) encode(pe packetEncoder) error {
+ return pe.putString("abc")
+}
+
+// not specific to request tests, just helper functions for testing structures that
+// implement the encoder or decoder interfaces that needed somewhere to live
+
+func testEncodable(t *testing.T, name string, in encoder, expect []byte) {
+ packet, err := encode(in, nil)
+ if err != nil {
+ t.Error(err)
+ } else if !bytes.Equal(packet, expect) {
+ t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect)
+ }
+}
+
+func testDecodable(t *testing.T, name string, out decoder, in []byte) {
+ err := decode(in, out)
+ if err != nil {
+ t.Error("Decoding", name, "failed:", err)
+ }
+}
+
+func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) {
+ err := versionedDecode(in, out, version)
+ if err != nil {
+ t.Error("Decoding", name, "version", version, "failed:", err)
+ }
+}
+
+func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
+ packet := testRequestEncode(t, name, rb, expected)
+ testRequestDecode(t, name, rb, packet)
+}
+
+func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
+ req := &request{correlationID: 123, clientID: "foo", body: rb}
+ packet, err := encode(req, nil)
+ headerSize := 14 + len("foo")
+ if err != nil {
+ t.Error(err)
+ } else if !bytes.Equal(packet[headerSize:], expected) {
+ t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
+ }
+ return packet
+}
+
+func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
+ decoded, n, err := decodeRequest(bytes.NewReader(packet))
+ if err != nil {
+ t.Error("Failed to decode request", err)
+ } else if decoded.correlationID != 123 || decoded.clientID != "foo" {
+ t.Errorf("Decoded header %q is not valid: %+v", name, decoded)
+ } else if !reflect.DeepEqual(rb, decoded.body) {
+ t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body))
+ } else if n != len(packet) {
+ t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet))
+ }
+}
+
+func testResponse(t *testing.T, name string, res protocolBody, expected []byte) {
+ encoded, err := encode(res, nil)
+ if err != nil {
+ t.Error(err)
+ } else if expected != nil && !bytes.Equal(encoded, expected) {
+ t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected)
+ }
+
+ decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder)
+ if err := versionedDecode(encoded, decoded, res.version()); err != nil {
+ t.Error("Decoding", name, "failed:", err)
+ }
+
+ if !reflect.DeepEqual(decoded, res) {
+ t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 000000000..f3f4d27d6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ return err
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header_test.go b/vendor/github.com/Shopify/sarama/response_header_test.go
new file mode 100644
index 000000000..8f9fdb80c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header_test.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "testing"
+
+var (
+ responseHeaderBytes = []byte{
+ 0x00, 0x00, 0x0f, 0x00,
+ 0x0a, 0xbb, 0xcc, 0xff}
+)
+
+func TestResponseHeader(t *testing.T) {
+ header := responseHeader{}
+
+ testDecodable(t, "response header", &header, responseHeaderBytes)
+ if header.length != 0xf00 {
+ t.Error("Decoding header length failed, got", header.length)
+ }
+ if header.correlationID != 0x0abbccff {
+ t.Error("Decoding header correlation id failed, got", header.correlationID)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 000000000..7d5dc60d3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,99 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
+consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
+https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
+and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | Name | Type | Description |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | incoming-byte-rate | meter | Bytes/second read off all brokers |
+ | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker |
+ | outgoing-byte-rate | meter | Bytes/second written off all brokers |
+ | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker |
+ | request-rate | meter | Requests/second sent to all brokers |
+ | request-rate-for-broker- | meter | Requests/second sent to a given broker |
+ | request-size | histogram | Distribution of the request size in bytes for all brokers |
+ | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker |
+ | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
+ | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker |
+ | response-rate | meter | Responses/second received from all brokers |
+ | response-rate-for-broker- | meter | Responses/second received from a given broker |
+ | response-size | histogram | Distribution of the response size in bytes for all brokers |
+ | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
+ | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
+ | record-send-rate | meter | Records/second sent to all topics |
+ | record-send-rate-for-topic- | meter | Records/second sent to a given topic |
+ | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
+ | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic |
+ | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
+ | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 000000000..fbbc8947b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,33 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+ Mechanism string
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Mechanism); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.Mechanism, err = pd.getString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go
new file mode 100644
index 000000000..806e628fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go
@@ -0,0 +1,17 @@
+package sarama
+
+import "testing"
+
+var (
+ baseSaslRequest = []byte{
+ 0, 3, 'f', 'o', 'o', // Mechanism
+ }
+)
+
+func TestSaslHandshakeRequest(t *testing.T) {
+ var request *SaslHandshakeRequest
+
+ request = new(SaslHandshakeRequest)
+ request.Mechanism = "foo"
+ testRequest(t, "basic", request, baseSaslRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 000000000..ef290d4bc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+ Err KError
+ EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go
new file mode 100644
index 000000000..1fd4c79e0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go
@@ -0,0 +1,24 @@
+package sarama
+
+import "testing"
+
+var (
+ saslHandshakeResponse = []byte{
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03, 'f', 'o', 'o',
+ }
+)
+
+func TestSaslHandshakeResponse(t *testing.T) {
+ var response *SaslHandshakeResponse
+
+ response = new(SaslHandshakeResponse)
+ testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding error failed: no error expected but found", response.Err)
+ }
+ if response.EnabledMechanisms[0] != "foo" {
+ t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 000000000..fe207080e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+ return err
+ }
+ for memberId, memberAssignment := range r.GroupAssignments {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := pe.putBytes(memberAssignment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupAssignments = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ memberAssignment, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ if r.GroupAssignments == nil {
+ r.GroupAssignments = make(map[string][]byte)
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+ bin, err := encode(memberAssignment, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupAssignment(memberId, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request_test.go b/vendor/github.com/Shopify/sarama/sync_group_request_test.go
new file mode 100644
index 000000000..3f537ef9f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request_test.go
@@ -0,0 +1,38 @@
+package sarama
+
+import "testing"
+
+var (
+ emptySyncGroupRequest = []byte{
+ 0, 3, 'f', 'o', 'o', // Group ID
+ 0x00, 0x01, 0x02, 0x03, // Generation ID
+ 0, 3, 'b', 'a', 'z', // Member ID
+ 0, 0, 0, 0, // no assignments
+ }
+
+ populatedSyncGroupRequest = []byte{
+ 0, 3, 'f', 'o', 'o', // Group ID
+ 0x00, 0x01, 0x02, 0x03, // Generation ID
+ 0, 3, 'b', 'a', 'z', // Member ID
+ 0, 0, 0, 1, // one assignment
+ 0, 3, 'b', 'a', 'z', // Member ID
+ 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment
+ }
+)
+
+func TestSyncGroupRequest(t *testing.T) {
+ var request *SyncGroupRequest
+
+ request = new(SyncGroupRequest)
+ request.GroupId = "foo"
+ request.GenerationId = 66051
+ request.MemberId = "baz"
+ testRequest(t, "empty", request, emptySyncGroupRequest)
+
+ request = new(SyncGroupRequest)
+ request.GroupId = "foo"
+ request.GenerationId = 66051
+ request.MemberId = "baz"
+ request.AddGroupAssignment("baz", []byte("foo"))
+ testRequest(t, "populated", request, populatedSyncGroupRequest)
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 000000000..194b382b4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+ Err KError
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(r.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ r.MemberAssignment, err = pd.getBytes()
+ return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response_test.go b/vendor/github.com/Shopify/sarama/sync_group_response_test.go
new file mode 100644
index 000000000..6fb708858
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response_test.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+ "reflect"
+ "testing"
+)
+
+var (
+ syncGroupResponseNoError = []byte{
+ 0x00, 0x00, // No error
+ 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data
+ }
+
+ syncGroupResponseWithError = []byte{
+ 0, 27, // ErrRebalanceInProgress
+ 0, 0, 0, 0, // No member assignment data
+ }
+)
+
+func TestSyncGroupResponse(t *testing.T) {
+ var response *SyncGroupResponse
+
+ response = new(SyncGroupResponse)
+ testVersionDecodable(t, "no error", response, syncGroupResponseNoError, 0)
+ if response.Err != ErrNoError {
+ t.Error("Decoding Err failed: no error expected but found", response.Err)
+ }
+ if !reflect.DeepEqual(response.MemberAssignment, []byte{0x01, 0x02, 0x03}) {
+ t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment)
+ }
+
+ response = new(SyncGroupResponse)
+ testVersionDecodable(t, "no error", response, syncGroupResponseWithError, 0)
+ if response.Err != ErrRebalanceInProgress {
+ t.Error("Decoding Err failed: ErrRebalanceInProgress expected but found", response.Err)
+ }
+ if !reflect.DeepEqual(response.MemberAssignment, []byte{}) {
+ t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 000000000..dd096b6db
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,164 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // SendMessages produces a given set of messages, and returns only when all
+ // messages in the set have either succeeded or failed. Note that messages
+ // can succeed and fail individually; if some succeed and some fail,
+ // SendMessages will return an error.
+ SendMessages(msgs []*ProducerMessage) error
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ if config == nil {
+ config = NewConfig()
+ config.Producer.Return.Successes = true
+ }
+
+ if err := verifyProducerConfig(config); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ if err := verifyProducerConfig(client.Config()); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+ if !config.Producer.Return.Errors {
+ return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+ }
+ if !config.Producer.Return.Successes {
+ return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+ }
+ return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ oldMetadata := msg.Metadata
+ defer func() {
+ msg.Metadata = oldMetadata
+ }()
+
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+
+ if err := <-expectation; err != nil {
+ return -1, -1, err.Err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+ savedMetadata := make([]interface{}, len(msgs))
+ for i := range msgs {
+ savedMetadata[i] = msgs[i].Metadata
+ }
+ defer func() {
+ for i := range msgs {
+ msgs[i].Metadata = savedMetadata[i]
+ }
+ }()
+
+ expectations := make(chan chan *ProducerError, len(msgs))
+ go func() {
+ for _, msg := range msgs {
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+ expectations <- expectation
+ }
+ close(expectations)
+ }()
+
+ var errors ProducerErrors
+ for expectation := range expectations {
+ if err := <-expectation; err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.Metadata.(chan *ProducerError)
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.Metadata.(chan *ProducerError)
+ expectation <- err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer_test.go b/vendor/github.com/Shopify/sarama/sync_producer_test.go
new file mode 100644
index 000000000..cb97548db
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer_test.go
@@ -0,0 +1,199 @@
+package sarama
+
+import (
+ "log"
+ "sync"
+ "testing"
+)
+
+func TestSyncProducer(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ for i := 0; i < 10; i++ {
+ leader.Returns(prodSuccess)
+ }
+
+ producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ msg := &ProducerMessage{
+ Topic: "my_topic",
+ Value: StringEncoder(TestMessage),
+ Metadata: "test",
+ }
+
+ partition, offset, err := producer.SendMessage(msg)
+
+ if partition != 0 || msg.Partition != partition {
+ t.Error("Unexpected partition")
+ }
+ if offset != 0 || msg.Offset != offset {
+ t.Error("Unexpected offset")
+ }
+ if str, ok := msg.Metadata.(string); !ok || str != "test" {
+ t.Error("Unexpected metadata")
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ safeClose(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestSyncProducerBatch(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 3
+ config.Producer.Return.Successes = true
+ producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = producer.SendMessages([]*ProducerMessage{
+ {
+ Topic: "my_topic",
+ Value: StringEncoder(TestMessage),
+ Metadata: "test",
+ },
+ {
+ Topic: "my_topic",
+ Value: StringEncoder(TestMessage),
+ Metadata: "test",
+ },
+ {
+ Topic: "my_topic",
+ Value: StringEncoder(TestMessage),
+ Metadata: "test",
+ },
+ })
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ safeClose(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestConcurrentSyncProducer(t *testing.T) {
+ seedBroker := NewMockBroker(t, 1)
+ leader := NewMockBroker(t, 2)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
+ seedBroker.Returns(metadataResponse)
+
+ prodSuccess := new(ProduceResponse)
+ prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
+ leader.Returns(prodSuccess)
+
+ config := NewConfig()
+ config.Producer.Flush.Messages = 100
+ config.Producer.Return.Successes = true
+ producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg := sync.WaitGroup{}
+
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}
+ partition, _, err := producer.SendMessage(msg)
+ if partition != 0 {
+ t.Error("Unexpected partition")
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ safeClose(t, producer)
+ leader.Close()
+ seedBroker.Close()
+}
+
+func TestSyncProducerToNonExistingTopic(t *testing.T) {
+ broker := NewMockBroker(t, 1)
+
+ metadataResponse := new(MetadataResponse)
+ metadataResponse.AddBroker(broker.Addr(), broker.BrokerID())
+ metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError)
+ broker.Returns(metadataResponse)
+
+ config := NewConfig()
+ config.Metadata.Retry.Max = 0
+ config.Producer.Retry.Max = 0
+ config.Producer.Return.Successes = true
+
+ producer, err := NewSyncProducer([]string{broker.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataResponse = new(MetadataResponse)
+ metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
+ broker.Returns(metadataResponse)
+
+ _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"})
+ if err != ErrUnknownTopicOrPartition {
+ t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err)
+ }
+
+ safeClose(t, producer)
+ broker.Close()
+}
+
+// This example shows the basic usage pattern of the SyncProducer.
+func ExampleSyncProducer() {
+ producer, err := NewSyncProducer([]string{"localhost:9092"}, nil)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer func() {
+ if err := producer.Close(); err != nil {
+ log.Fatalln(err)
+ }
+ }()
+
+ msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
+ partition, offset, err := producer.SendMessage(msg)
+ if err != nil {
+ log.Printf("FAILED to send message: %s\n", err)
+ } else {
+ log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/tools/README.md b/vendor/github.com/Shopify/sarama/tools/README.md
new file mode 100644
index 000000000..3464c4ad8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/README.md
@@ -0,0 +1,10 @@
+# Sarama tools
+
+This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
+Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
+
+- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
+- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
+- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
+
+To install all tools, run `go get github.com/Shopify/sarama/tools/...`
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
new file mode 100644
index 000000000..67da9dfa9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-consumer
+kafka-console-consumer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
new file mode 100644
index 000000000..4e77f0b70
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
@@ -0,0 +1,29 @@
+# kafka-console-consumer
+
+A simple command line tool to consume partitions of a topic and print the
+messages on the standard output.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-consumer
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-consumer -topic=test -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-consumer -topic=test
+
+ # You can specify the offset you want to start at. It can be either
+ # `oldest`, `newest`. The default is `newest`.
+ kafka-console-consumer -topic=test -offset=oldest
+ kafka-console-consumer -topic=test -offset=newest
+
+ # You can specify the partition(s) you want to consume as a comma-separated
+ # list. The default is `all`.
+ kafka-console-consumer -topic=test -partitions=1,2,3
+
+ # Display all command line options
+ kafka-console-consumer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
new file mode 100644
index 000000000..0f1eb89a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
@@ -0,0 +1,145 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
+ topic = flag.String("topic", "", "REQUIRED: the topic to consume")
+ partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
+ offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
+ verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
+ bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("-topic is required")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ var initialOffset int64
+ switch *offset {
+ case "oldest":
+ initialOffset = sarama.OffsetOldest
+ case "newest":
+ initialOffset = sarama.OffsetNewest
+ default:
+ printUsageErrorAndExit("-offset should be `oldest` or `newest`")
+ }
+
+ c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer: %s", err)
+ }
+
+ partitionList, err := getPartitions(c)
+ if err != nil {
+ printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
+ }
+
+ var (
+ messages = make(chan *sarama.ConsumerMessage, *bufferSize)
+ closing = make(chan struct{})
+ wg sync.WaitGroup
+ )
+
+ go func() {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt)
+ <-signals
+ logger.Println("Initiating shutdown of consumer...")
+ close(closing)
+ }()
+
+ for _, partition := range partitionList {
+ pc, err := c.ConsumePartition(*topic, partition, initialOffset)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
+ }
+
+ go func(pc sarama.PartitionConsumer) {
+ <-closing
+ pc.AsyncClose()
+ }(pc)
+
+ wg.Add(1)
+ go func(pc sarama.PartitionConsumer) {
+ defer wg.Done()
+ for message := range pc.Messages() {
+ messages <- message
+ }
+ }(pc)
+ }
+
+ go func() {
+ for msg := range messages {
+ fmt.Printf("Partition:\t%d\n", msg.Partition)
+ fmt.Printf("Offset:\t%d\n", msg.Offset)
+ fmt.Printf("Key:\t%s\n", string(msg.Key))
+ fmt.Printf("Value:\t%s\n", string(msg.Value))
+ fmt.Println()
+ }
+ }()
+
+ wg.Wait()
+ logger.Println("Done consuming topic", *topic)
+ close(messages)
+
+ if err := c.Close(); err != nil {
+ logger.Println("Failed to close consumer: ", err)
+ }
+}
+
+func getPartitions(c sarama.Consumer) ([]int32, error) {
+ if *partitions == "all" {
+ return c.Partitions(*topic)
+ }
+
+ tmp := strings.Split(*partitions, ",")
+ var pList []int32
+ for i := range tmp {
+ val, err := strconv.ParseInt(tmp[i], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ pList = append(pList, int32(val))
+ }
+
+ return pList, nil
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
new file mode 100644
index 000000000..5837fe8ca
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-partitionconsumer
+kafka-console-partitionconsumer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
new file mode 100644
index 000000000..646dd5f5c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
@@ -0,0 +1,28 @@
+# kafka-console-partitionconsumer
+
+NOTE: this tool is deprecated in favour of the more general and more powerful
+`kafka-console-consumer`.
+
+A simple command line tool to consume a partition of a topic and print the messages
+on the standard output.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-partitionconsumer -topic=test -partition=4
+
+ # You can specify the offset you want to start at. It can be either
+ # `oldest`, `newest`, or a specific offset number
+ kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
+ kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
+
+ # Display all command line options
+ kafka-console-partitionconsumer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
new file mode 100644
index 000000000..d5e4464de
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
@@ -0,0 +1,102 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+
+ "github.com/Shopify/sarama"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
+ topic = flag.String("topic", "", "REQUIRED: the topic to consume")
+ partition = flag.Int("partition", -1, "REQUIRED: the partition to consume")
+ offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
+ verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("-topic is required")
+ }
+
+ if *partition == -1 {
+ printUsageErrorAndExit("-partition is required")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ var (
+ initialOffset int64
+ offsetError error
+ )
+ switch *offset {
+ case "oldest":
+ initialOffset = sarama.OffsetOldest
+ case "newest":
+ initialOffset = sarama.OffsetNewest
+ default:
+ initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
+ }
+
+ if offsetError != nil {
+ printUsageErrorAndExit("Invalid initial offset: %s", *offset)
+ }
+
+ c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer: %s", err)
+ }
+
+ pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start partition consumer: %s", err)
+ }
+
+ go func() {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt)
+ <-signals
+ pc.AsyncClose()
+ }()
+
+ for msg := range pc.Messages() {
+ fmt.Printf("Offset:\t%d\n", msg.Offset)
+ fmt.Printf("Key:\t%s\n", string(msg.Key))
+ fmt.Printf("Value:\t%s\n", string(msg.Value))
+ fmt.Println()
+ }
+
+ if err := c.Close(); err != nil {
+ logger.Println("Failed to close consumer: ", err)
+ }
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
new file mode 100644
index 000000000..2b9e563a1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-producer
+kafka-console-producer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
new file mode 100644
index 000000000..6b3a65f21
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
@@ -0,0 +1,34 @@
+# kafka-console-producer
+
+A simple command line tool to produce a single message to Kafka.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-producer
+
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-producer -topic=test -value=value
+
+ # It will read the value from stdin by using pipes
+ echo "hello world" | kafka-console-producer -topic=test
+
+ # Specify a key:
+ echo "hello world" | kafka-console-producer -topic=test -key=key
+
+ # Partitioning: by default, kafka-console-producer will partition as follows:
+ # - manual partitioning if a -partition is provided
+ # - hash partitioning by key if a -key is provided
+ # - random partioning otherwise.
+ #
+ # You can override this using the -partitioner argument:
+ echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
+
+ # Display all command line options
+ kafka-console-producer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
new file mode 100644
index 000000000..83054ed78
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
@@ -0,0 +1,124 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ "github.com/Shopify/sarama"
+ "github.com/rcrowley/go-metrics"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
+ topic = flag.String("topic", "", "REQUIRED: the topic to produce to")
+ key = flag.String("key", "", "The key of the message to produce. Can be empty.")
+ value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
+ partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
+ partition = flag.Int("partition", -1, "The partition to produce to.")
+ verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
+ showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr")
+ silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("no -topic specified")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ config := sarama.NewConfig()
+ config.Producer.RequiredAcks = sarama.WaitForAll
+ config.Producer.Return.Successes = true
+
+ switch *partitioner {
+ case "":
+ if *partition >= 0 {
+ config.Producer.Partitioner = sarama.NewManualPartitioner
+ } else {
+ config.Producer.Partitioner = sarama.NewHashPartitioner
+ }
+ case "hash":
+ config.Producer.Partitioner = sarama.NewHashPartitioner
+ case "random":
+ config.Producer.Partitioner = sarama.NewRandomPartitioner
+ case "manual":
+ config.Producer.Partitioner = sarama.NewManualPartitioner
+ if *partition == -1 {
+ printUsageErrorAndExit("-partition is required when partitioning manually")
+ }
+ default:
+ printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
+ }
+
+ message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
+
+ if *key != "" {
+ message.Key = sarama.StringEncoder(*key)
+ }
+
+ if *value != "" {
+ message.Value = sarama.StringEncoder(*value)
+ } else if stdinAvailable() {
+ bytes, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
+ }
+ message.Value = sarama.ByteEncoder(bytes)
+ } else {
+ printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
+ }
+
+ producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
+ if err != nil {
+ printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
+ }
+ defer func() {
+ if err := producer.Close(); err != nil {
+ logger.Println("Failed to close Kafka producer cleanly:", err)
+ }
+ }()
+
+ partition, offset, err := producer.SendMessage(message)
+ if err != nil {
+ printErrorAndExit(69, "Failed to produce message: %s", err)
+ } else if !*silent {
+ fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
+ }
+ if *showMetrics {
+ metrics.WriteOnce(config.MetricRegistry, os.Stderr)
+ }
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(message string) {
+ fmt.Fprintln(os.Stderr, "ERROR:", message)
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
+
+func stdinAvailable() bool {
+ stat, _ := os.Stdin.Stat()
+ return (stat.Mode() & os.ModeCharDevice) == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 000000000..d36db9210
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+ "bufio"
+ "net"
+ "sort"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupeAndSort(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ for _, val := range input {
+ ret = append(ret, val)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ tmp := b // local var prevents clobbering in goroutine
+ go withRecover(func() {
+ if connected, _ := tmp.Connected(); connected {
+ if err := tmp.Close(); err != nil {
+ Logger.Println("Error closing broker", tmp.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+ net.Conn
+ buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+ return &bufConn{
+ Conn: conn,
+ buf: bufio.NewReader(conn),
+ }
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+ return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+ // it's a struct rather than just typing the array directly to make it opaque and stop people
+ // generating their own arbitrary versions
+ version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+ return KafkaVersion{
+ version: [4]uint{major, minor, veryMinor, patch},
+ }
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+// V1.IsAtLeast(V2) // false
+// V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+ for i := range v.version {
+ if v.version[i] > other.version[i] {
+ return true
+ } else if v.version[i] < other.version[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+ V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
+ V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
+ V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
+ V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
+ V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
+ V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+ V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+ V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+ V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+ minVersion = V0_8_2_0
+)
diff --git a/vendor/github.com/Shopify/sarama/utils_test.go b/vendor/github.com/Shopify/sarama/utils_test.go
new file mode 100644
index 000000000..a9e09502c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils_test.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "testing"
+
+func TestVersionCompare(t *testing.T) {
+ if V0_8_2_0.IsAtLeast(V0_8_2_1) {
+ t.Error("0.8.2.0 >= 0.8.2.1")
+ }
+ if !V0_8_2_1.IsAtLeast(V0_8_2_0) {
+ t.Error("! 0.8.2.1 >= 0.8.2.0")
+ }
+ if !V0_8_2_0.IsAtLeast(V0_8_2_0) {
+ t.Error("! 0.8.2.0 >= 0.8.2.0")
+ }
+ if !V0_9_0_0.IsAtLeast(V0_8_2_1) {
+ t.Error("! 0.9.0.0 >= 0.8.2.1")
+ }
+ if V0_8_2_1.IsAtLeast(V0_10_0_0) {
+ t.Error("0.8.2.1 >= 0.10.0.0")
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
new file mode 100755
index 000000000..95e47dde4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+set -ex
+
+# Launch and wait for toxiproxy
+${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
+while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
+done
diff --git a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf
new file mode 100644
index 000000000..25101df5a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf
@@ -0,0 +1,9 @@
+start on started zookeeper-ZK_PORT
+stop on stopping zookeeper-ZK_PORT
+
+# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper)
+script
+ sleep 2
+ export KAFKA_HEAP_OPTS="-Xmx320m"
+ exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
+end script
diff --git a/vendor/github.com/Shopify/sarama/vagrant/provision.sh b/vendor/github.com/Shopify/sarama/vagrant/provision.sh
new file mode 100755
index 000000000..ace768f40
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/vagrant/provision.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -ex
+
+apt-get update
+yes | apt-get install default-jre
+
+export KAFKA_INSTALL_ROOT=/opt
+export KAFKA_HOSTNAME=192.168.100.67
+export KAFKA_VERSION=0.9.0.1
+export REPOSITORY_ROOT=/vagrant
+
+sh /vagrant/vagrant/install_cluster.sh
+sh /vagrant/vagrant/setup_services.sh
+sh /vagrant/vagrant/create_topics.sh
diff --git a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
new file mode 100755
index 000000000..e52c00e7b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+set -ex
+
+${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
+PID=$!
+
+while ! nc -q 1 localhost 8474
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=KAFKA_DATADIR
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=2
+
+# Create new topics with a replication factor of 2 so failover can be tested
+# more easily.
+default.replication.factor=2
+
+auto.create.topics.enable=false
+delete.topic.enable=true
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+log.retention.bytes=268435456
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=268435456
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:ZK_PORT
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.session.timeout.ms=3000
+zookeeper.connection.timeout.ms=3000
diff --git a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
new file mode 100755
index 000000000..81d8ea05d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -ex
+
+stop toxiproxy || true
+cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
+cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
+start toxiproxy
+
+for i in 1 2 3 4 5; do
+ ZK_PORT=`expr $i + 2180`
+ KAFKA_PORT=`expr $i + 9090`
+
+ stop zookeeper-${ZK_PORT} || true
+
+ # set up zk service
+ cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
+ sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
+
+ # set up kafka service
+ cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
+ sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
+ sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
+
+ start zookeeper-${ZK_PORT}
+done
+
+# Wait for the last kafka node to finish booting
+while ! nc -q 1 localhost 29095 close calls shutdown breaking forked parent process
+ * [THRIFT-732] - server exits abnormally when client calls send_xxx function without calling recv_xxx function
+ * [THRIFT-3942] - TSSLSocket does not honor send and receive timeouts
+ * [THRIFT-3941] - WinXP version of thrift_poll() relies on undefined behavior by passing a destructed variable to select()
+ * [THRIFT-3940] - Visual Studio project file for compiler is broken
+ * [THRIFT-3943] - Coverity Scan identified some high severity defects
+ * [THRIFT-3929] - PHP "nsglobal" Option Results in Syntax Error in Generated Code (Trailing Backslash)
+ * [THRIFT-3936] - Cannot compile 0.10.0 development tip with VS2013 and earlier (snprintf, uint32_t)
+ * [THRIFT-3935] - Incorrect skipping of map and set
+ * [THRIFT-3920] - Ruby: Ensuring that HTTP failures will clear the http transport outbuf var
+ * [THRIFT-3919] - C# TTLSServerSocket does not use clientTimeout
+ * [THRIFT-3917] - Check backports.ssl_match_hostname module version
+ * [THRIFT-3909] - Fix c_glib static lib CMake build
+ * [THRIFT-3904] - Typo in node tutorial leads to wrong transport being used
+ * [THRIFT-3848] - As an implementer of a perl socket server, I do not want to have to remember to ignore SIGCHLD for it to work properly
+ * [THRIFT-3844] - thrift_protocol cannot compile in 7.0.7
+ * [THRIFT-3843] - integer issues with Haxe PHP targets cause ZigZag encoding to fail
+ * [THRIFT-3842] - Dart generates incorrect code for a const struct
+ * [THRIFT-3841] - dart compact protocol incorrectly serializes/deserialized doubles
+ * [THRIFT-3708] - NameError: global name 'TProtocol' is not defined
+ * [THRIFT-3704] - "TConnectedClient died: Could not refill buffer" message shown when using HTTP Server
+ * [THRIFT-3678] - Fix javadoc errors on JDK 8
+ * [THRIFT-3014] - AppVeyor support
+ * [THRIFT-2994] - Node.js TJSONProtocol cannot be used for object serialization.
+ * [THRIFT-2974] - writeToParcel throws NPE for optional enum fields
+ * [THRIFT-2948] - Python TJSONProtocol doesn't handle structs with binary fields containing invalid unicode.
+ * [THRIFT-2845] - ChildService.Plo: No such file or directory
+ * [THRIFT-3276] - Binary data does not decode correctly using the TJSONProtocol when the base64 encoded data is padded.
+ * [THRIFT-3253] - Using latest version of D gives deprecation notices
+ * [THRIFT-2883] - TTwisted.py, during ConnectionLost processing: exceptions.RuntimeError: dictionary changed size during iteration
+ * [THRIFT-2019] - Writing on a disconnected socket on Mac causes SIG PIPE
+ * [THRIFT-2020] - Thrift library has some empty files that haven't really been deleted
+ * [THRIFT-2049] - Go compiler doesn't build on native Windows
+ * [THRIFT-2024] - TServer.cpp warns on 64-bit platforms about truncating an rlim_t into an int
+ * [THRIFT-2023] - gettimeofday implementation on Windows errors when no time zone is passed in.
+ * [THRIFT-2022] - CoB and dense code generation still uses TR1 bind, even though that doesn't work with clang
+ * [THRIFT-2027] - Minor 64-bit and NOMINMAX issues in C++ library
+ * [THRIFT-2156] - TServerSocket::listen() is throwing exceptions with misleading information
+ * [THRIFT-2154] - Missing #deepCopy should return T
+ * [THRIFT-3157] - TBase signature should be TBase, F extends TFieldIdEnum>
+ * [THRIFT-3156] - Node TLS: server executes processing logic two full times
+ * [THRIFT-3154] - tutorial/py.tornado throw EOF exception
+ * [THRIFT-3063] - C++ build -Wunused-parameter warnings on processor_test, TransportTest
+ * [THRIFT-3056] - Add string/collection length limits for Python protocol readers
+ * [THRIFT-3237] - Fix TNamedPipeServer::createNamedPipe memory leak
+ * [THRIFT-3233] - Fix C++ ThreadManager::Impl::removeWorker worker join
+ * [THRIFT-3232] - Cannot deserialize json messages created with fieldNamesAsString
+ * [THRIFT-3206] - Fix Visual Studio build failure due 'pthread_self': identifier not found
+ * [THRIFT-3200] - JS and nodejs do not encode JSON protocol binary fields as base64
+ * [THRIFT-3199] - Exception field has basic metadata
+ * [THRIFT-3182] - TFramedTransport is in an invalid state after frame size exception
+ * [THRIFT-2536] - new TSocket, uninitialised value reported by valgrind
+ * [THRIFT-2527] - Apache Thrift IDL Compiler code generated for Node.js should be jshint clean
+ * [THRIFT-2519] - "processor" class is not being generated
+ * [THRIFT-2431] - TFileTransportTest fails with "check delta < XXX failed"
+ * [THRIFT-2708] - Erlang library does not support "oneway" message type
+ * [THRIFT-3377] - Deep copy is actually shallow when using typedef members
+ * [THRIFT-3376] - C# and Python JSON protocol double values lose precision
+ * [THRIFT-3373] - Various fixes for cross test servers and clients
+ * [THRIFT-3370] - errno extern variable redefined. Not compiling for Android
+ * [THRIFT-3379] - Potential out of range panic in Go JSON protocols
+ * [THRIFT-3371] - Abstract namespace Unix domain sockets broken in C++
+ * [THRIFT-3380] - nodejs: 0.9.2 -> 0.9.3 upgrade breaks Protocol and Transport requires
+ * [THRIFT-3367] - Fix bad links to coding_standards.md #634
+ * [THRIFT-3401] - Nested collections emit Objective-C code that cannot compile
+ * [THRIFT-3403] - JSON String reader doesn't recognize UTF-16 surrogate pairs
+ * [THRIFT-3362] - make check fails for C++ at the SecurityTest
+ * [THRIFT-3395] - Cocoa compiler produces corrupt code when boxing enums inside map.
+ * [THRIFT-3394] - compiler generates uncompilable code
+ * [THRIFT-3388] - hash doesn't work on set/list
+ * [THRIFT-3391] - Wrong bool formatting in test server
+ * [THRIFT-3390] - TTornado server doesn't handle closed connections properly
+ * [THRIFT-3382] - TBase class for C++ Library
+ * [THRIFT-3392] - Java TZlibTransport does not close its wrapper streams upon close()
+ * [THRIFT-3383] - i64 related warnings
+ * [THRIFT-3386] - misc. warnings with make check
+ * [THRIFT-3385] - warning: format ‘%lu’ expects ‘long unsigned int’, but has type ‘std::basic_string::size_type {aka unsigned int}
+ * [THRIFT-3355] - npm WARN package.json thrift@1.0.0-dev No license field.
+ * [THRIFT-3360] - Improve cross test servers and clients further
+ * [THRIFT-3359] - Binary field incompatibilities
+ * [THRIFT-3354] - Fix word-extraction substr bug in initialism code
+ * [THRIFT-3350] - Python JSON protocol does not encode binary as Base64
+ * [THRIFT-3577] - assertion failed at line 512 of testcontainertest.c
+ * [THRIFT-3576] - Boost test --log_format arg does not accept lowercase
+ * [THRIFT-3575] - Go compiler tries to use unexported library methods when using read_write_private
+ * [THRIFT-3574] - Cocoa generator makes uncompilable imports
+ * [THRIFT-3570] - Remove duplicate instances that are added by upstream
+ * [THRIFT-3571] - Make feature test result browsable
+ * [THRIFT-3569] - c_glib protocols do not check number of bytes read by transport
+ * [THRIFT-3568] - THeader server crashes on readSlow
+ * [THRIFT-3567] - GLib-GObject-CRITICAL **: g_object_unref: assertion 'G_IS_OBJECT (object)' failed
+ * [THRIFT-3566] - C++/Qt: TQTcpServerTest::test_communicate() is never executed
+ * [THRIFT-3564] - C++/Qt: potential core dump in TQTcpServer in case an exception occurs in TAsyncProcessor::process()
+ * [THRIFT-3558] - typos in c_glib tests
+ * [THRIFT-3559] - Fix awkward extra semi-colons with Cocoa container literals
+ * [THRIFT-3555] - 'configure' script does not honor --with-openssl= for libcrypto for BN_init
+ * [THRIFT-3554] - Constant decls may lead to "Error: internal error: prepare_member_name_mapping() already active for different struct"
+ * [THRIFT-3552] - glib_c Memory Leak
+ * [THRIFT-3551] - Thrift perl library missing package declaration
+ * [THRIFT-3549] - Exceptions are not properly stringified in Perl library
+ * [THRIFT-3546] - NodeJS code should not be namespaced (and is currently not strict-mode compliant)
+ * [THRIFT-3545] - Container type literals do not compile
+ * [THRIFT-3538] - Remove UnboundMethodType in TProtocolDecorator
+ * [THRIFT-3536] - Error 'char' does not contain a definition for 'IsLowSurrogate' for WP7 target
+ * [THRIFT-3534] - Link error when building with Qt5
+ * [THRIFT-3533] - Can not send nil pointer as service method argument
+ * [THRIFT-3507] - THttpClient does not use proxy from http_proxy, https_proxy environment variables
+ * [THRIFT-3502] - C++ TServerSocket passes small buffer to getsockname
+ * [THRIFT-3501] - Forward slash in comment causes compiler error
+ * [THRIFT-3498] - C++ library assumes optional function pthread_attr_setschedpolicy is available
+ * [THRIFT-3497] - Build fails with "invalid use of incomplete type"
+ * [THRIFT-3496] - C++: Cob style client fails when sending a consecutive request
+ * [THRIFT-3493] - libthrift does not compile on windows using visual studio
+ * [THRIFT-3488] - warning: unused variable 'program'
+ * [THRIFT-3489] - warning: deprecated conversion from string constant to 'char*' [-Wwrite-strings]
+ * [THRIFT-3487] - Full support for newer Delphi versions
+ * [THRIFT-3528] - Fix warnings in thrift.ll
+ * [THRIFT-3527] - -gen py:dynamic,utf8strings ignores utf8strings option
+ * [THRIFT-3526] - Code generated by py:utf8strings does not work for Python3
+ * [THRIFT-3524] - dcc32 warning "W1000 Symbol 'IsLowSurrogate' is deprecated: 'Use TCharHelper'" in Thrift.Protocol.JSON.pas
+ * [THRIFT-3525] - py:dynamic fails to handle binary list/set/map element
+ * [THRIFT-3521] - TSimpleJSONProtocolTest is not deterministic (fails when run on JDK 8)
+ * [THRIFT-3520] - Dart TSocket onError stream should be typed as Object
+ * [THRIFT-3519] - fastbinary does not work with -gen py:utf8strings
+ * [THRIFT-3518] - TConcurrentClientSyncInfo files were missing for Visual Studio
+ * [THRIFT-3512] - c_glib: Build fails due to missing features.h
+ * [THRIFT-3483] - Incorrect empty binary handling introduced by THRIFT-3359
+ * [THRIFT-3479] - Oneway calls should not return exceptions to clients
+ * [THRIFT-3478] - Restore dropped method to THsHaServer.java
+ * [THRIFT-3477] - Parser fails on enum item that starts with 'E' letter and continues with number
+ * [THRIFT-3476] - Missing include in ./src/thrift/protocol/TJSONProtocol.cpp
+ * [THRIFT-3474] - Docker: thrift-compiler
+ * [THRIFT-3473] - When "optional' is used with a struct member, C++ server seems to not return it correctly
+ * [THRIFT-3468] - Dart TSocketTransport onError handler is too restrictive
+ * [THRIFT-3451] - thrift_protocol PHP extension missing config.m4 file
+ * [THRIFT-3456] - rounding issue in static assert
+ * [THRIFT-3455] - struct write method's return value is incorrect
+ * [THRIFT-3454] - Python Tornado tutorial is broken
+ * [THRIFT-3463] - Java can't be disabled in CMake build
+ * [THRIFT-3450] - NPE when using SSL
+ * [THRIFT-3449] - TBaseAsyncProcessor fb.responseReady() never called for oneway functions
+ * [THRIFT-3471] - Dart generator does not handle uppercase argument names
+ * [THRIFT-3470] - Sporadic timeouts with pipes
+ * [THRIFT-3465] - Go Code With Complex Const Initializer Compilation Depends On Struct Order
+ * [THRIFT-3464] - Fix several defects in c_glib code generator
+ * [THRIFT-3462] - Cocoa generates Incorrect #import header names
+ * [THRIFT-3453] - remove rat_exclude
+ * [THRIFT-3418] - Use of ciphers in ssl.wrap_socket() breaks python 2.6 compatibility
+ * [THRIFT-3417] - "namespace xsd" is not really working
+ * [THRIFT-3413] - Thrift code generation bug in Go when extending service
+ * [THRIFT-3420] - C++: TSSLSockets are not interruptable
+ * [THRIFT-3415] - include unistd.h conditionally
+ * [THRIFT-3414] - #include in THeaderTransport.h breaks windows build
+ * [THRIFT-3411] - Go generates remotes with wrong package qualifiers when including
+ * [THRIFT-3430] - Go THttpClient does not read HTTP response body to completion when closing
+ * [THRIFT-3423] - First call to thrift_transport:read_exact fails to dispatch correct function
+ * [THRIFT-3422] - Go TServerSocket doesn't close on Interrupt
+ * [THRIFT-3421] - rebar as dependency instead of bundling (was: rebar fails if PWD contains Unicode)
+ * [THRIFT-3428] - Go test fails when running make check
+ * [THRIFT-3445] - Throwable messages are hidden from JVM stack trace output
+ * [THRIFT-3443] - Thrift include can generate uncompilable code
+ * [THRIFT-3444] - Large 64 bit Integer does not preserve value through Node.js JSONProtocol
+ * [THRIFT-3436] - misc. cross test issues with UTF-8 path names
+ * [THRIFT-3435] - Put generated Java code for fullcamel tests in a separate package/namespace
+ * [THRIFT-3433] - Doubles aren't interpreted correctly
+ * [THRIFT-3437] - Mingw-w64 build fail
+ * [THRIFT-3434] - Dart generator produces empty name in pubspec.yaml for includes without namespaces
+ * [THRIFT-3408] - JSON generator emits incorrect types
+ * [THRIFT-3406] - Cocoa client should not schedule streams on main runloop
+ * [THRIFT-3404] - JSON String reader doesn't recognize UTF-16 surrogate pair
+ * [THRIFT-3636] - Double precision is not fully preserved in C++ TJSONProtocol
+ * [THRIFT-3632] - c_glib testserialization fails with glib assertion
+ * [THRIFT-3619] - Using Thrift 0.9.3 with googletest on Linux gcc 4.9 / C++11
+ * [THRIFT-3617] - CMake does not build gv/xml generators
+ * [THRIFT-3615] - Fix Python SSL client resource leak on connection failure
+ * [THRIFT-3616] - lib/py/test/test_sslsocket.py is flaky
+ * [THRIFT-3643] - Perl SSL server crushes if a client disconnect without handshake
+ * [THRIFT-3639] - C# Thrift library forces TLS 1.0, thwarting TLS 1.2 usage
+ * [THRIFT-3633] - Travis "C C++ - GCC" build was using clang
+ * [THRIFT-3634] - Fix Python TSocket resource leak on connection failure
+ * [THRIFT-3630] - Debian/Ubuntu install docs need an update
+ * [THRIFT-3629] - Parser sets exitcode on errors, but generator does not
+ * [THRIFT-3608] - lib/cpp/test/SecurityTest is flaky in jenkins Thrift-precommit build.
+ * [THRIFT-3601] - Better conformance to PEP8 for generated code
+ * [THRIFT-3599] - Validate client IP address against cert's SubjectAltName
+ * [THRIFT-3598] - TBufferedTransport doesn't instantiate client connection
+ * [THRIFT-3597] - `make check` hangs in go tests
+ * [THRIFT-3589] - Dart generator uses wrong name in constructor for uppercase arguments with defaults
+ * [THRIFT-3588] - Using TypeScript with --noImplicitAny fails
+ * [THRIFT-3584] - boolean false value cannot be transferred
+ * [THRIFT-3578] - Make THeaderTransport detect TCompact framed and unframed
+ * [THRIFT-3323] - Python library does not handle escaped forward slash ("/") in JSON
+ * [THRIFT-3322] - CMake generated "make check" failes on python_test
+ * [THRIFT-3321] - Thrift can't be added as a subdirectory of another CMake-based project
+ * [THRIFT-3314] - Dots in file names of includes causes dots in javascript variable names
+ * [THRIFT-3307] - Segfault in Ruby serializer
+ * [THRIFT-3309] - Missing TConstant.php in /lib/php/Makefile.am
+ * [THRIFT-3810] - unresolved external symbol public: virtual void __cdecl apache::thrift::server::TServerFramework::serve(void)
+ * [THRIFT-3736] - C++ library build fails if OpenSSL does not surrpot SSLv3
+ * [THRIFT-3878] - Compile error in TSSLSocket.cpp with new OpenSSL [CRYPTO_num_locks]
+ * [THRIFT-3949] - missing make dist entry for compiler/cpp/test
+ * [THRIFT-449] - The wire format of the JSON Protocol may not always be valid JSON if it contains non-UTF8 encoded strings
+ * [THRIFT-162] - Thrift structures are unhashable, preventing them from being used as set elements
+ * [THRIFT-3961] - TConnectedClient does not terminate the connection to the client if an exception while processing the received message occures.
+ * [THRIFT-3881] - Travis CI builds are failing due to docker failures (three retries, and gives up)
+ * [THRIFT-3937] - Cannot compile 0.10.0 development tip with gcc-4.6.x
+ * [THRIFT-3964] - Unsupported mechanism type ????? due to dependency on default OS-dependent charset
+ * [THRIFT-3038] - Use of volatile in cpp library
+ * [THRIFT-3301] - Java generated code uses imports that can lead to class name collisions with IDL defined types
+ * [THRIFT-3348] - PHP TCompactProtocol bool&int64 readvalue bug
+ * [THRIFT-3955] - TThreadedServer Memory Leak
+ * [THRIFT-3829] - Thrift does not install Python Libraries if Twisted is not installed
+ * [THRIFT-3932] - C++ ThreadManager has a rare termination race
+ * [THRIFT-3828] - cmake fails when Boost_INCLUDE_DIRS (and other variables passed to include_directories()) is empty
+ * [THRIFT-3958] - CMake WITH_MT option for windows static runtime linking does not support the cmake build type RelWithDebInfo
+ * [THRIFT-3957] - TConnectedClient does not disconnect from clients when their timeout is reached.
+ * [THRIFT-3953] - TSSLSocket::close should handle exceptions from waitForEvent because it is called by the destructor.
+ * [THRIFT-3977] - PHP extension creates undefined values when deserializing sets
+ * [THRIFT-3947] - sockaddr type isn't always large enough for the return of getsockname
+ * [THRIFT-2755] - ThreadSanitizer reports data race in ThreadManager::Impl::addWorker
+ * [THRIFT-3948] - errno is not the correct method of getting the error in windows
+ * [THRIFT-4008] - broken ci due to upstream dependency versioning break
+ * [THRIFT-3999] - Fix Debian & Ubuntu package dependencies
+ * [THRIFT-3886] - PHP cross test client returns 0 even when failing
+ * [THRIFT-3997] - building thrift libs does not support new openssl
+
+## Documentation
+ * [THRIFT-3867] - Specify BinaryProtocol and CompactProtocol
+
+## Epic
+ * [THRIFT-3049] - As an iOS developer, I want a generator and library that produces Swift code
+ * [THRIFT-2336] - UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+
+## Improvement
+ * [THRIFT-1867] - Python client/server should support client-side certificates.
+ * [THRIFT-1313] - c_glib compact support
+ * [THRIFT-1385] - make install doesn't install java library in the setted folder
+ * [THRIFT-1437] - Update RPM spec
+ * [THRIFT-847] - Test Framework harmonization across all languages
+ * [THRIFT-819] - add Enumeration for protocol, transport and server types
+ * [THRIFT-3927] - Emit an error instead of throw an error in the async callback
+ * [THRIFT-3931] - TSimpleServer: If process request encounter UNKNOWN_METHOD, don't close transport.
+ * [THRIFT-3934] - Automatically resolve OpenSSL binary version on Windows CI
+ * [THRIFT-3918] - Run subset of make cross
+ * [THRIFT-3908] - Remove redundant dependencies from Dockerfile
+ * [THRIFT-3907] - Skip Docker image build on CI when unchanged
+ * [THRIFT-3868] - Java struct equals should do identity check before field comparison
+ * [THRIFT-3849] - Port Go serializer and deserializer to dart
+ * [THRIFT-2989] - Complete CMake build for Apache Thrift
+ * [THRIFT-2980] - ThriftMemoryBuffer doesn't have a constructor option to take an existing buffer
+ * [THRIFT-2856] - refactor erlang basic transports and unify interfaces
+ * [THRIFT-2877] - Optimize generated hashCode
+ * [THRIFT-2869] - JSON: run schema validation from tests
+ * [THRIFT-3112] - [Java] AsyncMethodCallback should be typed in generated AsyncIface
+ * [THRIFT-3263] - PHP jsonSerialize() should cast scalar types
+ * [THRIFT-2905] - Cocoa compiler should have option to produce "modern" Objective-C
+ * [THRIFT-2821] - Enable the use of custom HTTP-Header in the Transport
+ * [THRIFT-2093] - added the ability to set compression level in C++ zlib transport
+ * [THRIFT-2089] - Compiler ignores duplicate typenames
+ * [THRIFT-2056] - Moved all #include config.h statements to #include
+ * [THRIFT-2031] - Make SO_KEEPALIVE configurable for C++ lib
+ * [THRIFT-2021] - Improve large binary protocol string performance
+ * [THRIFT-2028] - Cleanup threading headers / libraries
+ * [THRIFT-2014] - Change C++ lib includes to use style throughout
+ * [THRIFT-2312] - travis.yml: build everything
+ * [THRIFT-1915] - Multiplexing Services
+ * [THRIFT-1736] - Visual Studio top level project files within msvc
+ * [THRIFT-1735] - integrate tutorial into regular build
+ * [THRIFT-1533] - Make TTransport should be Closeable
+ * [THRIFT-35] - Move language tests into their appropriate library directory
+ * [THRIFT-1079] - Support i64 in AS3
+ * [THRIFT-1108] - SSL support for the Ruby library
+ * [THRIFT-3856] - update debian package deependencies
+ * [THRIFT-3833] - haxe http server implementation (by embeding into php web server)
+ * [THRIFT-3839] - Performance issue with big message deserialization using php extension
+ * [THRIFT-3820] - Erlang: Detect OTP >= 18 to use new time correction
+ * [THRIFT-3816] - Reduce docker build duration on Travis-CI
+ * [THRIFT-3815] - Put appveyor dependency versions to one place
+ * [THRIFT-3788] - Compatibility improvements and Win64 support
+ * [THRIFT-3792] - Timeouts for anonymous pipes should be configurable
+ * [THRIFT-3794] - Split Delphi application, protocol and transport exception subtypes into separate exceptions
+ * [THRIFT-3774] - The generated code should have exception_names meta info
+ * [THRIFT-3762] - Fix build warnings for deprecated Thrift "byte" fields
+ * [THRIFT-3756] - Improve requiredness documentation
+ * [THRIFT-3761] - Add debian package for Python3
+ * [THRIFT-3742] - haxe php cli support
+ * [THRIFT-3733] - Socket timeout improvements
+ * [THRIFT-3728] - http transport for thrift-lua
+ * [THRIFT-3905] - Dart compiler does not initialize bool, int, and double properties
+ * [THRIFT-3911] - Loosen Ruby dev dependency version requirements
+ * [THRIFT-3906] - Run C# tests with make check
+ * [THRIFT-3900] - Add Python SSL flags
+ * [THRIFT-3897] - Provide meaningful exception type based on WebExceptionStatus in case of timeout
+ * [THRIFT-3808] - Missing `DOUBLE` in thrift type enumeration
+ * [THRIFT-3803] - Remove "file" attribute from XML generator
+ * [THRIFT-3660] - Add V4 mapped address to test client cert's altname
+ * [THRIFT-3661] - Use https to download meck in erlang test build
+ * [THRIFT-3659] - Check configure result of CMake on CI
+ * [THRIFT-3667] - Add TLS SNI support to clients
+ * [THRIFT-3651] - Make backports.match_hostname and ipaddress optional
+ * [THRIFT-3666] - Build D tutorial as part of Autotools build
+ * [THRIFT-3665] - Add D libevent and OpenSSL to docker images
+ * [THRIFT-3664] - Remove md5.c
+ * [THRIFT-3662] - Add Haskell to debian docker image
+ * [THRIFT-3711] - Add D to cross language test
+ * [THRIFT-3691] - Run flake8 Python style check on Travis-CI
+ * [THRIFT-3692] - (Re)enable Appveyor C++ and Python build
+ * [THRIFT-3677] - Improve CMake Java build
+ * [THRIFT-3679] - Add stdout log to testBinary in Java test server
+ * [THRIFT-3718] - Reduce size of docker image for build environment
+ * [THRIFT-3698] - [Travis-CI] Introduce retry to apt commands
+ * [THRIFT-3127] - switch -recurse to --recurse and reserve -r
+ * [THRIFT-3087] - Pass on errors like "connection closed"
+ * [THRIFT-3240] - Thrift Python client should support subjectAltName and wildcard certs in TSSLSocket
+ * [THRIFT-3213] - make cross should indicate when it skips a known failing test
+ * [THRIFT-3208] - Fix Visual Studio solution build failure due to missing source
+ * [THRIFT-3186] - Add TServerHTTP to Go library
+ * [THRIFT-2342] - Add __FILE__ and __LINE__ to Thrift C++ excpetions
+ * [THRIFT-3372] - Add dart generator to Visual Studio project
+ * [THRIFT-3366] - ThriftTest to implement standard return values
+ * [THRIFT-3402] - Provide a perl Unix Socket implementation
+ * [THRIFT-3361] - Improve C# library
+ * [THRIFT-3393] - Introduce i8 to provide consistent set of Thrift IDL integer types
+ * [THRIFT-3339] - Support for database/sql
+ * [THRIFT-3565] - C++: T[Async]Processor::getEventHandler() should be declared as const member functions
+ * [THRIFT-3563] - C++/Qt: removed usage of macro QT_PREPEND_NAMESPACE as it isn't consequently used for all references to Qt types.
+ * [THRIFT-3562] - Removed unused TAsyncProcessor::getAsyncServer()
+ * [THRIFT-3561] - C++/Qt: make use of Q_DISABLE_COPY() to get rid of copy ctor and assignment operator
+ * [THRIFT-3556] - c_glib file descriptor transport
+ * [THRIFT-3544] - Make cross test fail when server process died unexpectedly
+ * [THRIFT-3540] - Make python tutorial more in line with PEP8
+ * [THRIFT-3535] - Dart generator argument to produce a file structure usable in parent library
+ * [THRIFT-3505] - Enhance Python TSSLSocket
+ * [THRIFT-3506] - Eliminate old style classes from library code
+ * [THRIFT-3503] - Enable py:utf8string by default
+ * [THRIFT-3499] - Add package_prefix to python generator
+ * [THRIFT-3495] - Minor enhancements and fixes for cross test
+ * [THRIFT-3486] - Java generated `getFieldValue` is incompatible with `setFieldValue` for binary values.
+ * [THRIFT-3484] - Consolidate temporary buffers in Java's TCompactProtocol
+ * [THRIFT-3516] - Add feature test for THeader TBinaryProtocol interop
+ * [THRIFT-3515] - Python 2.6 compatibility and test on CI
+ * [THRIFT-3514] - PHP 7 compatible version of binary protocol
+ * [THRIFT-3469] - Docker: Debian support
+ * [THRIFT-3416] - Retire old "xxx_namespace" declarations from the IDL
+ * [THRIFT-3426] - Align autogen comment in XSD
+ * [THRIFT-3424] - Add CMake android build option
+ * [THRIFT-3439] - Run make cross using Python3 when available
+ * [THRIFT-3440] - Python make check takes too much time
+ * [THRIFT-3441] - Stabilize Travis-CI builds
+ * [THRIFT-3431] - Avoid "schemes" HashMap lookups during struct reads/writes
+ * [THRIFT-3432] - Add a TByteBuffer transport to the Java library
+ * [THRIFT-3438] - Enable py:new_style by default
+ * [THRIFT-3405] - Go THttpClient misuses http.Client objects
+ * [THRIFT-3614] - Improve logging of test_sslsocket.py
+ * [THRIFT-3647] - Fix php extension build warnings
+ * [THRIFT-3642] - Speed up cross test runner
+ * [THRIFT-3637] - Implement compact protocol for dart
+ * [THRIFT-3613] - Port Python C extension to Python 3
+ * [THRIFT-3612] - Add Python C extension for compact protocol
+ * [THRIFT-3611] - Add --regex filter to cross test runner
+ * [THRIFT-3631] - JSON protocol implementation for Lua
+ * [THRIFT-3609] - Remove or replace TestPortFixture.h
+ * [THRIFT-3605] - Have the compiler complain about invalid arguments and options
+ * [THRIFT-3596] - Better conformance to PEP8
+ * [THRIFT-3585] - Compact protocol implementation for Lua
+ * [THRIFT-3582] - Erlang libraries should have service metadata
+ * [THRIFT-3579] - Introduce retry to make cross
+ * [THRIFT-3306] - Java: TBinaryProtocol: Use 1 temp buffer instead of allocating 8
+ * [THRIFT-3910] - Do not invoke pip as part of build process
+ * [THRIFT-1857] - Python 3.X Support
+ * [THRIFT-1944] - Binding to zero port
+ * [THRIFT-3954] - Enable the usage of structs called "Object" in Java
+ * [THRIFT-3981] - Enable analyzer strong mode in Dart library
+ * [THRIFT-3998] - Document ability to add custom tags to thrift structs
+ * [THRIFT-4006] - Add a removeEventListener method on TSocket
+
+## New Feature
+ * [THRIFT-640] - Support deprecation
+ * [THRIFT-948] - SSL socket support for PHP
+ * [THRIFT-764] - add Support for Vala language
+ * [THRIFT-3046] - Allow PSR4 class loading for generated classes (PHP)
+ * [THRIFT-2113] - Erlang SSL Socket Support
+ * [THRIFT-1482] - Unix domain socket support under PHP
+ * [THRIFT-519] - Support collections of types without having to explicitly define it
+ * [THRIFT-468] - Rack Middleware Application for Rails
+ * [THRIFT-1708] - Add event handlers for processor events
+ * [THRIFT-3834] - Erlang namespacing and exception metadata
+ * [THRIFT-2510] - Implement TNonblockingServer's ability to listen on unix domain sockets
+ * [THRIFT-3397] - Implement TProcessorFactory in C# to enable per-client processors
+ * [THRIFT-3523] - XML Generator
+ * [THRIFT-3510] - Add HttpTaskAsyncHandler implementation
+ * [THRIFT-3318] - PHP: SimpleJSONProtocol Implementation
+ * [THRIFT-3299] - Dart language bindings in Thrift
+ * [THRIFT-2835] - Add possibility to distribute generators separately from thrift core, and load them dynamically
+ * [THRIFT-184] - Add OSGi Manifest headers to the libthrift java library to be able to use Thrift in the OSGi runtime
+ * [THRIFT-141] - If a required field is not present on serialization, throw an exception
+ * [THRIFT-1891] - Add Windows ALPC transport which is right counterpart of Unix domain sockets
+
+## Question
+ * [THRIFT-1808] - The Thrift struct should be considered self-contained?
+ * [THRIFT-2895] - Tutorial cpp
+ * [THRIFT-3860] - Elephant-bird application Test fails for Thrift
+ * [THRIFT-3811] - HTTPS Support for C++ applications
+ * [THRIFT-3509] - "make check" error
+
+## Story
+ * [THRIFT-3452] - .travis.yml: Migrating from legacy to container-based infrastructure
+
+## Sub-task
+ * [THRIFT-1811] - ruby tutorial as part of the regular build
+ * [THRIFT-2779] - PHP TJSONProtocol encode unicode into UCS-4LE which can't be parsed by other language bindings
+ * [THRIFT-2110] - Erlang: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-3852] - A Travis-CI job fails with "write error"
+ * [THRIFT-3740] - Fix haxelib.json classpath
+ * [THRIFT-3653] - incorrect union serialization
+ * [THRIFT-3652] - incorrect serialization of optionals
+ * [THRIFT-3655] - incorrect union serialization
+ * [THRIFT-3654] - incorrect serialization of optionals
+ * [THRIFT-3656] - incorrect serialization of optionals
+ * [THRIFT-3699] - Fix integer limit symbol includes in Python C extension
+ * [THRIFT-3693] - Fix include issue in C++ TSSLSocketInterruptTest on Windows
+ * [THRIFT-3694] - [Windows] Disable tests of a few servers that are not supported
+ * [THRIFT-3696] - Install pip to CentOS Docker images to fix Python builds
+ * [THRIFT-3638] - Fix haxelib.json
+ * [THRIFT-3251] - Add http transport for server to Go lib
+ * [THRIFT-2424] - Recursive Types
+ * [THRIFT-2423] - THeader
+ * [THRIFT-2413] - Python: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2409] - Java: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2412] - D: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2411] - C++: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2410] - JavaMe: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2668] - TestSuite: detailed result on passed tests by feature
+ * [THRIFT-2659] - python Test Server fails when throwing TException
+ * [THRIFT-3398] - Add CMake build for Haskell library and tests
+ * [THRIFT-3396] - DART: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-3364] - Fix ruby binary field encoding in TJSONProtocol
+ * [THRIFT-3381] - Fix for misc. codegen issues with THRIFT-2905
+ * [THRIFT-3573] - No rule to make target `../../../test/c_glib/src/.deps/testthrifttest-thrift_test_handler.Po'.
+ * [THRIFT-3572] - "Unable to determine the behavior of a signed right shift"
+ * [THRIFT-3542] - Add length limit support to Java test server
+ * [THRIFT-3537] - Remove the (now obsolete) csharp:asyncctp flag
+ * [THRIFT-3532] - Add configurable string and container read size limit to Python protocols
+ * [THRIFT-3531] - Create cross lang feature test for string and container read length limit
+ * [THRIFT-3482] - Haskell JSON protocol does not encode binary field as Base64
+ * [THRIFT-3425] - Minor fixes + simplification for CentOS Dockerfile
+ * [THRIFT-3442] - Run CMake tests on Appveyor
+ * [THRIFT-3409] - NodeJS binary field issues
+ * [THRIFT-3621] - Fix lib/cpp/test/SecurityTest.cpp to use ephemeral ports
+ * [THRIFT-3628] - Fix lib/cpp/test/TServerIntegrationTest.cpp to use ephemeral ports
+ * [THRIFT-3625] - Kill unused #include "TestPortFixture.h" in lib/cpp/test/TServerTransportTest.cpp.
+ * [THRIFT-3646] - Fix Python extension build warnings
+ * [THRIFT-3626] - Fix lib/cpp/test/TSocketInterruptTest.cpp to use ephemeral ports.
+ * [THRIFT-3624] - Fix lib/cpp/test/TServerSocketTest.cpp to use ephemeral ports
+ * [THRIFT-3623] - Fix Fix cpp/lib/test/TSSLSocketInterruptTest.cpp to use ephemeral ports
+ * [THRIFT-3592] - Add basic test client
+ * [THRIFT-3980] - add TExtendedBinaryProtocol.java
+
+## Task
+ * [THRIFT-1801] - Sync up TApplicationException codes across languages and thrift implementations
+ * [THRIFT-1259] - Automate versioning
+
+## Test
+ * [THRIFT-3400] - Add Erlang to cross test
+ * [THRIFT-3504] - Fix FastbinaryTest.py
+
+## Wish
+ * [THRIFT-3923] - Maybe remove Aereo from the "Powered by" list
+ * [THRIFT-2149] - Add an option to disable the generation of default operators
+
+
+
+Thrift 0.9.3
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-2441] - Cannot shutdown TThreadedServer when clients are still connected
+ * [THRIFT-2465] - TBinaryProtocolT breaks if copied/moved
+ * [THRIFT-2474] - thrift.h causes a compile failure
+ * [THRIFT-2540] - Running configure from outside the source directory fails
+ * [THRIFT-2598] - Add check for minimum Go version to configure.ac
+ * [THRIFT-2647] - compiler-hs: don't decapitalize field names, do decapitalize argument bindings
+ * [THRIFT-2773] - Generated Java code for 'oneway' methods is incorrect.
+ * [THRIFT-2789] - TNonblockingServer leaks socket FD's under load
+ * [THRIFT-2682] - TThreadedServer leaks per-thread memory
+ * [THRIFT-2674] - JavaScript: declare Accept: and Content-Type: in request
+ * [THRIFT-3078] - TNonblockingServerSocket's logger is not named after TNonblockingServerSocket
+ * [THRIFT-3077] - C++ TFileTransport ignores return code from ftruncate
+ * [THRIFT-3067] - C++ cppcheck performance related warnings
+ * [THRIFT-3066] - C++ TDenseProtocol assert modifies instead of checks
+ * [THRIFT-3071] - bootstrap.sh on Ubuntu 12.04 (Precise) automake error
+ * [THRIFT-3069] - C++ TServerSocket leaks socket on fcntl get or set flags error
+ * [THRIFT-3079] - TNonblockingServerSocket's logger is not named after TNonblockingServerSocket
+ * [THRIFT-3080] - C++ TNonblockingServer connection leak while accept huge number connections.
+ * [THRIFT-3086] - C++ Valgrind Error Cleanup
+ * [THRIFT-3085] - thrift_reconnecting_client never try to reconnect
+ * [THRIFT-3123] - Missing include in compiler/cpp/src/main.h breaks build in some environments
+ * [THRIFT-3125] - Fix the list of exported headers in automake input
+ * [THRIFT-3126] - PHP JSON serializer converts empty or int-indexed maps to lists
+ * [THRIFT-3132] - Properly format date in Java @Generated annotations
+ * [THRIFT-3137] - Travis build hangs after failure
+ * [THRIFT-3138] - "make check" parallel execution is underministic
+ * [THRIFT-3139] - JS library test is flaky
+ * [THRIFT-3140] - ConcurrentModificationException is thrown by JavaScript test server
+ * [THRIFT-3124] - Some signed/unsigned warnings while building compiler
+ * [THRIFT-3128] - Go generated code produces name collisions between services
+ * [THRIFT-3146] - Graphviz generates function name collisions between services
+ * [THRIFT-3147] - Segfault while receiving data
+ * [THRIFT-3148] - Markdown links to coding_standards are dead
+ * [THRIFT-3090] - cmake build is broken on MacOSX
+ * [THRIFT-3097] - cmake targets unconditionally depend on optional libraries
+ * [THRIFT-3094] - master as of 2015-APR-13 fails -DBOOST_THREADS cmake build
+ * [THRIFT-3099] - cmake build is broken on FreeBSD
+ * [THRIFT-3089] - Assigning default ENUM values results in non-compilable java code if java namespace is not defined
+ * [THRIFT-3093] - mingw compile fixes for c++ library 0.9.2
+ * [THRIFT-3098] - Thrift does not pretty print binary typedefs the way it does binary fields
+ * [THRIFT-3091] - c_glib service method should return result from handler method
+ * [THRIFT-3088] - TThreadPoolServer with Sasl auth may leak CLOSE_WAIT socket
+ * [THRIFT-3109] - Cross test log file cannot be browsed when served in HTTP server
+ * [THRIFT-3113] - m4 C++11 macro issue
+ * [THRIFT-3105] - C++ libthriftnb library on Windows build failure
+ * [THRIFT-3115] - Uncompileable code due to name collision with predefined used types
+ * [THRIFT-3117] - Java TSSLTransportFactory can't load certificates within JAR archive
+ * [THRIFT-3102] - could not make check for Go Library
+ * [THRIFT-3120] - Minor spelling errors and an outdated URL
+ * [THRIFT-3121] - Librt does not exist on OS X
+ * [THRIFT-3152] - Compiler error on Mac OSX (missing #include )
+ * [THRIFT-3162] - make fails for dmd 2.067
+ * [THRIFT-3164] - Thrift C++ library SSL socket by default allows for unsecure SSLv3 negotiation
+ * [THRIFT-3168] - Fix Maven POM
+ * [THRIFT-3170] - Initialism code in the Go compiler causes chaos
+ * [THRIFT-3169] - Do not export thrift.TestStruct and thrift.TestEnum in thrift Go library
+ * [THRIFT-3191] - Perl compiler does not add support for unexpected exception handling
+ * [THRIFT-3178] - glib C does not compile
+ * [THRIFT-3189] - Perl ServerSocket should allow a specific interface to be listened to
+ * [THRIFT-3252] - Missing TConcurrentClientSyncInfo.h in cpp Makefile, so doesn't install
+ * [THRIFT-3255] - Thrift generator doesn't exclude 'package' keyword for thrift property names breaking java builds
+ * [THRIFT-3260] - multiple warnings in c_glib tutorial
+ * [THRIFT-3256] - Some D test timings are too aggressive for slow machines
+ * [THRIFT-3257] - warning: extra tokens at end of #endif directive
+ * [THRIFT-3184] - Thrift Go leaves file descriptors open
+ * [THRIFT-3203] - DOAP - please fix "Ocaml" => "OCaml"
+ * [THRIFT-3210] - (uncompileable) code generated for server events while are events not enabled
+ * [THRIFT-3215] - TJSONProtocol '(c++) uses "throw new" to throw exceptions instead of "throw"
+ * [THRIFT-3202] - Allow HSHAServer to configure min and max worker threads separately.
+ * [THRIFT-3205] - TCompactProtocol return a wrong error when the io.EOF happens
+ * [THRIFT-3209] - LGPL mentioned in license file
+ * [THRIFT-3197] - keepAliveTime is hard coded as 60 sec in TThreadPoolServer
+ * [THRIFT-3196] - Misspelling in lua TBinaryProtocol (stirctWrite => strictWrite)
+ * [THRIFT-3198] - Allow construction of TTransportFactory with a specified maxLength
+ * [THRIFT-3192] - Go import paths changed in 1.4, and expired June 1
+ * [THRIFT-3271] - Could not find or load main class configtest_ax_javac_and_java on some non-english systems
+ * [THRIFT-3273] - c_glib: Generated code tries to convert between function and void pointers
+ * [THRIFT-3264] - Fix Erlang 16 namespaced types
+ * [THRIFT-3270] - reusing TNonblockingServer::TConnection cause dirty TSocket
+ * [THRIFT-3267] - c_glib: "Critical" failure during unit tests
+ * [THRIFT-3277] - THttpClient leaks connections if it's used for multiple requests
+ * [THRIFT-3278] - NodeJS: Fix exception stack traces and names
+ * [THRIFT-3279] - Fix a bug in retry_max_delay (NodeJS)
+ * [THRIFT-3280] - Initialize retry variables on construction
+ * [THRIFT-3283] - c_glib: Tutorial server always exits with warning
+ * [THRIFT-3284] - c_glib: Empty service produces unused-variable warning
+ * [THRIFT-1925] - c_glib generated code does not compile
+ * [THRIFT-1849] - after transport->open() opens isOpen returns true and next open() goes thru when it shall not
+ * [THRIFT-1866] - java compiler generates non-compiling code with const's defined in a thrift when name includes non-identifier chars
+ * [THRIFT-1938] - FunctionRunner.h -- uses wrong path for Thread.h when installed
+ * [THRIFT-1844] - Password string not cleared
+ * [THRIFT-2004] - Thrift::Union violates :== method contract and crashes
+ * [THRIFT-2073] - Thrift C++ THttpClient error: cannot refill buffer
+ * [THRIFT-2127] - Autoconf scripting does not properly account for cross-compile
+ * [THRIFT-2180] - Integer types issues in Cocoa lib on ARM64
+ * [THRIFT-2189] - Go needs "isset" to fully support "union" type (and optionals)
+ * [THRIFT-2192] - autotools on Redhat based systems
+ * [THRIFT-2546] - cross language tests fails at 'TestMultiException' when using nodejs server
+ * [THRIFT-2547] - nodejs servers and clients fails to connect with cpp using compact protocol
+ * [THRIFT-2548] - Nodejs servers and clients does not work properly with -ssl
+ * [THRIFT-1471] - toString() does not print ByteBuffer values when nested in a List
+ * [THRIFT-1201] - getaddrinfo resource leak
+ * [THRIFT-615] - TThreadPoolServer doesn't call task_done after pulling tasks from it's clients queue
+ * [THRIFT-162] - Thrift structures are unhashable, preventing them from being used as set elements
+ * [THRIFT-810] - Crashed client on TSocket::close under loads
+ * [THRIFT-557] - charset problem with file Autogenerated by Thrift
+ * [THRIFT-233] - IDL doesn't support negative hex literals
+ * [THRIFT-1649] - contrib/zeromq does not build in 0.8.0
+ * [THRIFT-1642] - Miscalculation lead to throw unexpected "TTransportException::TIMED_OUT"(or called "EAGAIN (timed out)") exception
+ * [THRIFT-1587] - TSocket::setRecvTimeout error
+ * [THRIFT-1248] - pointer subtraction in TMemoryBuffer relies on undefined behavior
+ * [THRIFT-1774] - Sasl Transport client would hang when trying to connect non-sasl transport server
+ * [THRIFT-1754] - RangeError in buffer handling
+ * [THRIFT-1618] - static structMap in FieldMetaData is not thread safe and can lead to deadlocks
+ * [THRIFT-2335] - thrift incompatibility with py:tornado as server, java as client
+ * [THRIFT-2803] - TCP_DEFER_ACCEPT not supported with domain sockets
+ * [THRIFT-2799] - Build Problem(s): ld: library not found for -l:libboost_unit_test_framework.a
+ * [THRIFT-2801] - C++ test suite compilation warnings
+ * [THRIFT-2802] - C++ tutorial compilation warnings
+ * [THRIFT-2795] - thrift_binary_protocol.c: 'dereferencing type-punned pointer will break strict-aliasing rules'
+ * [THRIFT-2817] - TSimpleJSONProtocol reads beyond end of message
+ * [THRIFT-2826] - html:standalone sometimes ignored
+ * [THRIFT-2829] - Support haxelib installation via github
+ * [THRIFT-2828] - slightly wrong help screen indent
+ * [THRIFT-2831] - Removes dead code in web_server.js introduced in THRIFT-2819
+ * [THRIFT-2823] - All JS-tests are failing when run with grunt test
+ * [THRIFT-2827] - Thrift 0.9.2 fails to compile on Yosemite due to tr1/functional include in ProcessorTest.cpp
+ * [THRIFT-2843] - Automake configure.ac has possible typo related to Java
+ * [THRIFT-2813] - multiple haxe library fixes/improvements
+ * [THRIFT-2825] - Supplying unicode to python Thrift client can cause next request arguments to get overwritten
+ * [THRIFT-2840] - Cabal file points to LICENSE file outside the path of the Haskell project.
+ * [THRIFT-2818] - Trailing commas in array
+ * [THRIFT-2830] - Clean up ant warnings in tutorial dir
+ * [THRIFT-2842] - Erlang thrift client has infinite timeout
+ * [THRIFT-2810] - Do not leave the underlying ServerSocket open if construction of TServerSocket fails
+ * [THRIFT-2812] - Go server adding redundant buffering layer
+ * [THRIFT-2839] - TFramedTransport read bug
+ * [THRIFT-2844] - Nodejs support broken when running under Browserify
+ * [THRIFT-2814] - args/result classes not found when no namespace is set
+ * [THRIFT-2847] - function IfValue() is a duplicate of System.StrUtils.IfThen
+ * [THRIFT-2848] - certain Delphi tests do not build if TypeRegistry is used
+ * [THRIFT-2854] - Go Struct writer and reader looses important error information
+ * [THRIFT-2858] - Enable header field case insensitive match in THttpServer
+ * [THRIFT-2857] - C# generator creates uncompilable code for struct constants
+ * [THRIFT-2860] - Delphi server closes connection on unexpected exceptions
+ * [THRIFT-2868] - Enhance error handling in the Go client
+ * [THRIFT-2879] - TMemoryBuffer: using lua string in wrong way
+ * [THRIFT-2851] - Remove strange public Peek() from Go transports
+ * [THRIFT-2852] - Better Open/IsOpen/Close behavior for StreamTransport.
+ * [THRIFT-2871] - Missing semicolon in thrift.js
+ * [THRIFT-2872] - ThreadManager deadlock for task expiration
+ * [THRIFT-2881] - Handle errors from Accept() correctly
+ * [THRIFT-2849] - Spell errors reported by codespell tool
+ * [THRIFT-2870] - C++ TJSONProtocol using locale dependent formatting
+ * [THRIFT-2882] - Lua Generator: using string.len funtion to get struct(map,list,set) size
+ * [THRIFT-2864] - JSON generator missing from Visual Studio build project
+ * [THRIFT-2878] - Go validation support of required fields
+ * [THRIFT-2873] - TPipe and TPipeServer don't compile on Windows with UNICODE enabled
+ * [THRIFT-2888] - import of is missing in JSON generator
+ * [THRIFT-2900] - Python THttpClient does not reset socket timeout on exception
+ * [THRIFT-2907] - 'ntohll' macro redefined
+ * [THRIFT-2884] - Map does not serialize correctly for JSON protocol in Go library
+ * [THRIFT-2887] - --with-openssl configure flag is ignored
+ * [THRIFT-2894] - PHP json serializer skips maps with int/bool keys
+ * [THRIFT-2904] - json_protocol_test.go fails
+ * [THRIFT-2906] - library not found for -l:libboost_unit_test_framework.a
+ * [THRIFT-2890] - binary data may lose bytes with JSON transport under specific circumstances
+ * [THRIFT-2891] - binary data may cause a failure with JSON transport under specific circumstances
+ * [THRIFT-2901] - Fix for generated TypeScript functions + indentation of JavaScript maps
+ * [THRIFT-2916] - make check fails for D language
+ * [THRIFT-2918] - Race condition in Python TProcessPoolServer test
+ * [THRIFT-2920] - Erlang Thrift test uses wrong IDL file
+ * [THRIFT-2922] - $TRIAL is used with Python tests but not tested accordingly
+ * [THRIFT-2912] - Autotool build for C++ Qt library is invalid
+ * [THRIFT-2914] - explicit dependency to Lua5.2 fails on some systems
+ * [THRIFT-2910] - libevent is not really optional
+ * [THRIFT-2911] - fix c++ version zeromq transport, the old version cannot work
+ * [THRIFT-2915] - Lua generator missing from Visual Studio build project
+ * [THRIFT-2917] - "make clean" breaks test/c_glib
+ * [THRIFT-2919] - Haxe test server timeout too large
+ * [THRIFT-2923] - JavaScript client assumes a message being written
+ * [THRIFT-2924] - TNonblockingServer crashes when user-provided event_base is used
+ * [THRIFT-2925] - CMake build does not work with OpenSSL nor anything installed in non-system location
+ * [THRIFT-2931] - Access to undeclared static property: Thrift\Protocol\TProtocol::$TBINARYPROTOCOLACCELERATED
+ * [THRIFT-2893] - CMake build fails with boost thread or std thread
+ * [THRIFT-2902] - Generated c_glib code does not compile with clang
+ * [THRIFT-2903] - Qt4 library built with CMake does not work
+ * [THRIFT-2942] - CSharp generate invalid code for property named read or write
+ * [THRIFT-2932] - Node.js Thrift connection libraries throw Exceptions into event emitter
+ * [THRIFT-2933] - v0.9.2: doubles encoded in node with compact protocol cannot be decoded by python
+ * [THRIFT-2934] - createServer signature mismatch
+ * [THRIFT-2981] - IDL with no namespace produces unparsable PHP
+ * [THRIFT-2999] - Addition of .gitattributes text auto in THRIFT-2724 causes modified files on checkout
+ * [THRIFT-2949] - typo in compiler/cpp/README.md
+ * [THRIFT-2957] - warning: source file %s is in a subdirectory, but option 'subdir-objects' is disabled
+ * [THRIFT-2953] - TNamedPipeServerTransport is not Stop()able
+ * [THRIFT-2962] - Docker Thrift env for development and testing
+ * [THRIFT-2971] - C++ test and tutorial parallel build is unstable
+ * [THRIFT-2972] - Missing backslash in lib/cpp/test/Makefile.am
+ * [THRIFT-2951] - Fix Erlang name conflict test
+ * [THRIFT-2955] - Using list of typedefs does not compile on Go
+ * [THRIFT-2960] - namespace regression for Ruby
+ * [THRIFT-2959] - nodejs: fix binary unit tests
+ * [THRIFT-2966] - nodejs: Fix bad references to TProtocolException and TProtocolExceptionType
+ * [THRIFT-2970] - grunt-jsdoc fails due to dependency issues
+ * [THRIFT-3001] - C# Equals fails for binary fields (byte[])
+ * [THRIFT-3003] - Missing LICENSE file prevents package from being installed
+ * [THRIFT-3008] - Node.js server does not fully support exception
+ * [THRIFT-3007] - Travis build is broken because of directory conflict
+ * [THRIFT-3009] - TSSLSocket does not use the correct hostname (breaks certificate checks)
+ * [THRIFT-3011] - C# test server testException() not implemented according to specs
+ * [THRIFT-3012] - Timing problems in NamedPipe implementation due to unnecessary open/close
+ * [THRIFT-3019] - Golang generator missing docstring for structs
+ * [THRIFT-3021] - Service remote tool does not import stub package with package prefix
+ * [THRIFT-3026] - TMultiplexedProcessor does not have a constructor
+ * [THRIFT-3028] - Regression caused by THRIFT-2180
+ * [THRIFT-3017] - order of map key/value types incorrect for one CTOR
+ * [THRIFT-3020] - Cannot compile thrift as C++03
+ * [THRIFT-3024] - User-Agent "BattleNet" used in some Thrift library files
+ * [THRIFT-3047] - Uneven calls to indent_up and indent_down in Cocoa generator
+ * [THRIFT-3048] - NodeJS decoding of I64 is inconsistent across protocols
+ * [THRIFT-3043] - go compiler generator uses non C++98 code
+ * [THRIFT-3044] - Docker README.md paths to Dockerfiles are incorrect
+ * [THRIFT-3040] - bower.json wrong "main" path
+ * [THRIFT-3051] - Go Thrift generator creates bad go code
+ * [THRIFT-3057] - Java compiler build is broken
+ * [THRIFT-3061] - C++ TSSLSocket shutdown delay/vulnerability
+ * [THRIFT-3062] - C++ TServerSocket invalid port number (over 999999) causes stack corruption
+ * [THRIFT-3065] - Update libthrift dependencies (slf4j, httpcore, httpclient)
+ * [THRIFT-3244] - TypeScript: fix namespace of included types
+ * [THRIFT-3246] - Reduce the number of trivial warnings in Windows C++ CMake builds
+ * [THRIFT-3224] - Fix TNamedPipeServer unpredictable behavior on accept
+ * [THRIFT-3230] - Python compiler generates wrong code if there is function throwing a typedef of exception with another namespace
+ * [THRIFT-3236] - MaxSkipDepth never checked
+ * [THRIFT-3239] - Limit recursion depth
+ * [THRIFT-3241] - fatal error: runtime: cannot map pages in arena address space
+ * [THRIFT-3242] - OSGi Import-Package directive is missing the Apache HTTP packages
+ * [THRIFT-3234] - Limit recursion depth
+ * [THRIFT-3222] - TypeScript: Generated Enums are quoted
+ * [THRIFT-3229] - unexpected Timeout exception when desired bytes are only partially available
+ * [THRIFT-3231] - CPP: Limit recursion depth to 64
+ * [THRIFT-3235] - Limit recursion depth
+ * [THRIFT-3175] - fastbinary.c python deserialize can cause huge allocations from garbage
+ * [THRIFT-3176] - Union incorrectly implements ==
+ * [THRIFT-3177] - Fails to run rake test
+ * [THRIFT-3180] - lua plugin: framed transport do not work
+ * [THRIFT-3179] - lua plugin cant connect to remote server because function l_socket_create_and_connect always bind socket to localhost
+ * [THRIFT-3248] - TypeScript: additional comma in method signature without parameters
+ * [THRIFT-3302] - Go JSON protocol should encode Thrift byte type as signed integer string
+ * [THRIFT-3297] - c_glib: an abstract base class is not generated
+ * [THRIFT-3294] - TZlibTransport for Java does not write data correctly
+ * [THRIFT-3296] - Go cross test does not conform to spec
+ * [THRIFT-3295] - C# library does not build on Mono 4.0.2.5 or later
+ * [THRIFT-3293] - JavaScript: null values turn into empty structs in constructor
+ * [THRIFT-3310] - lib/erl/README.md has incorrect formatting
+ * [THRIFT-3319] - CSharp tutorial will not build using the *.sln
+ * [THRIFT-3335] - Ruby server does not handle processor exception
+ * [THRIFT-3338] - Stray underscore in generated go when service name starts with "New"
+ * [THRIFT-3324] - Update Go Docs for pulling all packages
+ * [THRIFT-3345] - Clients blocked indefinitely when a java.lang.Error is thrown
+ * [THRIFT-3332] - make dist fails on clean build
+ * [THRIFT-3326] - Tests do not compile under *BSD
+ * [THRIFT-3334] - Markdown notation of protocol spec is malformed
+ * [THRIFT-3331] - warning: ‘etype’ may be used uninitialized in this function
+ * [THRIFT-3349] - Python server does not handle processor exception
+ * [THRIFT-3343] - Fix haskell README
+ * [THRIFT-3340] - Python: enable json tests again
+ * [THRIFT-3311] - Top level README.md has incorrect formmating
+ * [THRIFT-2936] - Minor memory leak in SSL
+ * [THRIFT-3290] - Using from in variable names causes the generated Python code to have errors
+ * [THRIFT-3225] - Fix TPipeServer unpredictable behavior on interrupt()
+ * [THRIFT-3354] - Fix word-extraction substr bug in initialism code
+ * [THRIFT-2006] - TBinaryProtocol message header call name length is not validated and can be used to core the server
+ * [THRIFT-3329] - C++ library unit tests don't compile against the new boost-1.59 unit test framework
+ * [THRIFT-2630] - windows7 64bit pc. ipv4 and ipv6 pc.can't use
+ * [THRIFT-3336] - Thrift generated streaming operators added in 0.9.2 cannot be overridden
+ * [THRIFT-2681] - Core of unwind_cleanup
+ * [THRIFT-3317] - cpp namespace org.apache issue appears in 0.9
+
+## Documentation
+ * [THRIFT-3286] - Apache Ant is a necessary dependency
+
+## Improvement
+ * [THRIFT-227] - Byte[] in collections aren't pretty printed like regular binary fields
+ * [THRIFT-2744] - Vagrantfile for Centos 6.5
+ * [THRIFT-2644] - Haxe support
+ * [THRIFT-2756] - register Media Type @ IANA
+ * [THRIFT-3076] - Compatibility with Haxe 3.2.0
+ * [THRIFT-3081] - C++ Consolidate client processing loops in TServers
+ * [THRIFT-3083] - C++ Consolidate server processing loops in TSimpleServer, TThreadedServer, TThreadPoolServer
+ * [THRIFT-3084] - C++ add concurrent client limit to threaded servers
+ * [THRIFT-3074] - Add compiler/cpp/lex.yythriftl.cc to gitignore.
+ * [THRIFT-3134] - Remove use of deprecated "phantom.args"
+ * [THRIFT-3133] - Allow "make cross" and "make precross" to run without building all languages
+ * [THRIFT-3142] - Make JavaScript use downloaded libraries
+ * [THRIFT-3141] - Improve logging of JavaScript test
+ * [THRIFT-3144] - Proposal: make String representation of enums in generated go code less verbose
+ * [THRIFT-3130] - Remove the last vestiges of THRIFT_OVERLOAD_IF from THRIFT-1316
+ * [THRIFT-3131] - Consolidate suggested import path for go thrift library to git.apache.org in docs and code
+ * [THRIFT-3092] - Generated Haskell types should derive Generic
+ * [THRIFT-3110] - Print error log after cross test failures on Travis
+ * [THRIFT-3114] - Using local temp variables to not pollute the global table
+ * [THRIFT-3106] - CMake summary should give more information why a library is set to off
+ * [THRIFT-3119] - Java's TThreadedSelectorServer has indistinguishable log messages in run()
+ * [THRIFT-3122] - Javascript struct constructor should properly initialize struct and container members from plain js arguments
+ * [THRIFT-3151] - Fix links to git-wip* - should be git.apache.org
+ * [THRIFT-3167] - Windows build from source instructions need to be revised
+ * [THRIFT-3155] - move contrib/mingw32-toolchain.cmake to build/cmake/
+ * [THRIFT-3160] - Make generated go enums implement TextMarshaller and TextUnmarshaller interfaces
+ * [THRIFT-3150] - Add an option to thrift go generator to make Read and Write methods private
+ * [THRIFT-3149] - Make ReadFieldN methods in generated Go code private
+ * [THRIFT-3172] - Add tutorial to Thrift web site
+ * [THRIFT-3214] - Add Erlang option for using maps instead of dicts
+ * [THRIFT-3201] - Capture github test artifacts for failed builds
+ * [THRIFT-3266] - c_glib: Multiple compiler warnings building unit tests
+ * [THRIFT-3285] - c_glib: Build library with all warnings enabled, no warnings generated
+ * [THRIFT-1954] - Allow for a separate connection timeout value
+ * [THRIFT-2098] - Add support for Qt5+
+ * [THRIFT-2199] - Remove Dense protocol (was: move to Contrib)
+ * [THRIFT-406] - C++ Test suite cleanup
+ * [THRIFT-902] - socket and connect timeout in TSocket should be distinguished
+ * [THRIFT-388] - Use a separate wire format for async calls
+ * [THRIFT-727] - support native C++ language specific exception message
+ * [THRIFT-1784] - pep-3110 compliance for exception handling
+ * [THRIFT-1025] - C++ ServerSocket should inherit from Socket with the necessary Ctor to listen on connections from a specific host
+ * [THRIFT-2269] - Can deploy libthrift-source.jar to maven center repository
+ * [THRIFT-2804] - Pull an interface out of TBaseAsyncProcessor
+ * [THRIFT-2806] - more whitespace fixups
+ * [THRIFT-2811] - Make remote socket address accessible
+ * [THRIFT-2809] - .gitignore update for compiler's visual project
+ * [THRIFT-2846] - Expose ciphers parameter from ssl.wrap_socket()
+ * [THRIFT-2859] - JSON generator: output complete descriptors
+ * [THRIFT-2861] - add buffered transport
+ * [THRIFT-2865] - Test case for Go: SeqId out of sequence
+ * [THRIFT-2866] - Go generator source code is hard to read and maintain
+ * [THRIFT-2880] - Read the network address from the listener if available.
+ * [THRIFT-2875] - Typo in TDenseProtocol.h comment
+ * [THRIFT-2874] - TBinaryProtocol member variable "string_buf_" is never used.
+ * [THRIFT-2855] - Move contributing.md to the root of the repository
+ * [THRIFT-2862] - Enable RTTI and/or build macros for generated code
+ * [THRIFT-2876] - Add test for THRIFT-2526 Assignment operators and copy constructors in c++ don't copy the __isset struct
+ * [THRIFT-2897] - Generate -isEqual: and -hash methods
+ * [THRIFT-2909] - Improve travis build
+ * [THRIFT-2921] - Make Erlang impl ready for OTP 18 release (dict/0 and set/0 are deprecated)
+ * [THRIFT-2928] - Rename the erlang test_server module
+ * [THRIFT-2940] - Allow installing Thrift from git as NPM module by providing package.json in top level directory
+ * [THRIFT-2937] - Allow setting a maximum frame size in TFramedTransport
+ * [THRIFT-2976] - nodejs: xhr and websocket support for browserify
+ * [THRIFT-2996] - Test for Haxe 3.1.3 or better
+ * [THRIFT-2969] - nodejs: DRY up library tests
+ * [THRIFT-2973] - Update Haxe lib readme regarding Haxe 3.1.3
+ * [THRIFT-2952] - Improve handling of Server.Stop()
+ * [THRIFT-2964] - nodejs: move protocols and transports into separate files
+ * [THRIFT-2963] - nodejs - add test coverage
+ * [THRIFT-3006] - Attach 'omitempty' json tag for optional fields in Go
+ * [THRIFT-3027] - Go compiler does not ensure common initialisms have consistent case
+ * [THRIFT-3030] - TThreadedServer: Property for number of clientThreads
+ * [THRIFT-3023] - Go compiler is a little overly conservative with names of attributes
+ * [THRIFT-3018] - Compact protocol for Delphi
+ * [THRIFT-3025] - Change pure Int constants into @enums (where possible)
+ * [THRIFT-3031] - migrate "shouldStop" flag to TServer
+ * [THRIFT-3022] - Compact protocol for Haxe
+ * [THRIFT-3041] - Generate asynchronous clients for Cocoa
+ * [THRIFT-3053] - Perl SSL Socket Support (Encryption)
+ * [THRIFT-3247] - Generate a C++ thread-safe client
+ * [THRIFT-3217] - Provide a little endian variant of the binary protocol in C++
+ * [THRIFT-3223] - TypeScript: Add initial support for Enum Maps
+ * [THRIFT-3220] - Option to suppress @Generated Annotation entirely
+ * [THRIFT-3300] - Reimplement TZlibTransport in Java using streams
+ * [THRIFT-3288] - c_glib: Build unit tests with all warnings enabled, no warnings generated
+ * [THRIFT-3347] - Improve cross test servers and clients
+ * [THRIFT-3342] - Improve ruby cross test client and server compatibility
+ * [THRIFT-2296] - Add C++ Base class for service
+ * [THRIFT-3337] - Add testBool method to cross tests
+ * [THRIFT-3303] - Disable concurrent cabal jobs on Travis to avoid GHC crash
+ * [THRIFT-2623] - Docker container for Thrift
+ * [THRIFT-3298] - thrift endian converters may conflict with other libraries
+ * [THRIFT-1559] - Provide memory pool for TBinaryProtocol to eliminate memory fragmentation
+ * [THRIFT-424] - Steal ProtocolBuffers' VarInt implementation for C++
+
+## New Feature
+ * [THRIFT-3070] - Add ability to set the LocalCertificateSelectionCallback
+ * [THRIFT-1909] - Java: Add compiler flag to use the "option pattern" for optional fields
+ * [THRIFT-2099] - Stop TThreadPoolServer with alive connections.
+ * [THRIFT-123] - implement TZlibTransport in Java
+ * [THRIFT-2368] - New option: reuse-objects for Java generator
+ * [THRIFT-2836] - Optionally generate C++11 MoveConstructible types
+ * [THRIFT-2824] - Flag to disable html escaping doctext
+ * [THRIFT-2819] - Add WebsSocket client to node.js
+ * [THRIFT-3050] - Client certificate authentication for non-http TLS in C#
+ * [THRIFT-3292] - Implement TZlibTransport in Go
+
+## Question
+ * [THRIFT-2583] - Thrift on xPC target (SpeedGoat)
+ * [THRIFT-2592] - thrift server using c_glib
+ * [THRIFT-2832] - c_glib: Handle string lists correctly
+ * [THRIFT-3136] - thrift installation problem on mac
+ * [THRIFT-3346] - c_glib: Tutorials example crashes saying Calculator.ping implementation returned FALSE but did not set an error
+
+## Sub-task
+ * [THRIFT-2578] - Moving 'make cross' from test.sh to test.py
+ * [THRIFT-2734] - Go coding standards
+ * [THRIFT-2748] - Add Vagrantfile for Centos 6.5
+ * [THRIFT-2753] - Misc. Haxe improvements
+ * [THRIFT-2640] - Compact Protocol in Cocoa
+ * [THRIFT-3262] - warning: overflow in implicit constant conversion in DenseProtoTest.cpp
+ * [THRIFT-3194] - Can't build with go enabled. gomock SCC path incorrect.
+ * [THRIFT-3275] - c_glib tutorial warnings in generated code
+ * [THRIFT-1125] - Multiplexing support for the Ruby Library
+ * [THRIFT-2807] - PHP Code Style
+ * [THRIFT-2841] - Add comprehensive integration tests for the whole Go stack
+ * [THRIFT-2815] - Haxe: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-2886] - Integrate binary type in standard Thrift cross test
+ * [THRIFT-2946] - Enhance usability of cross test framework
+ * [THRIFT-2967] - Add .editorconfig to root
+ * [THRIFT-3033] - Perl: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-3174] - Initialism code in the Go compiler doesn't check first word
+ * [THRIFT-3193] - Option to supress date value in @Generated annotation
+ * [THRIFT-3305] - Missing dist files for 0.9.3 release candidate
+ * [THRIFT-3341] - Add testBool methods
+ * [THRIFT-3308] - Fix broken test cases for 0.9.3 release candidate
+
+## Task
+ * [THRIFT-2834] - Remove semi-colons from python code generator
+ * [THRIFT-2853] - Adjust comments not applying anymore after THRIFT-2852
+
+## Test
+ * [THRIFT-3211] - Add make cross support for php TCompactProtocol
+
+## Wish
+ * [THRIFT-2838] - TNonblockingServer can bind to port 0 (i.e., get an OS-assigned port) but there is no way to get the port number
+
+
+
+Thrift 0.9.2
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-2793] - Go compiler produces uncompilable code
+ * [THRIFT-1481] - Unix domain sockets in C++ do not support the abstract namespace
+ * [THRIFT-1455] - TBinaryProtocolT::writeString casts from size_t to uint32_t, which is not safe on 64-bit platforms
+ * [THRIFT-1579] - PHP Extention - function thrift_protocol_read_binary not working from TBinarySerializer::deserialize
+ * [THRIFT-1584] - Error: could not SetMinThreads in ThreadPool on single-core machines
+ * [THRIFT-1614] - Thrift build from svn repo sources fails with automake-1.12
+ * [THRIFT-1047] - rb_thrift_memory_buffer_write treats arg as string without check, segfaults if you pass non-string
+ * [THRIFT-1639] - Java/Python: Serialization/Deserialization of double type using CompactProtocol
+ * [THRIFT-1647] - NodeJS BufferedTransport does not work beyond the hello-world example
+ * [THRIFT-2130] - Thrift's D library/test: parts of "make check" code do not compile with recent dmd-2.062 through dmd-2.064alpha
+ * [THRIFT-2140] - Error compiling cpp tutorials
+ * [THRIFT-2139] - MSVC 2012 Error - Cannot compile due to BoostThreadFactory
+ * [THRIFT-2138] - pkgconfig file created with wrong include path
+ * [THRIFT-2160] - Warning in thrift.h when compiling with -Wunused and NDEBUG
+ * [THRIFT-2158] - Compact, JSON, and SimpleJSON protocols are not working correctly
+ * [THRIFT-2167] - nodejs lib throws error if options argument isn't passed
+ * [THRIFT-2288] - Go impl of Thrift JSON protocol wrongly writes/expects true/false for bools
+ * [THRIFT-2147] - Thrift IDL grammar allows for dotted identifier names
+ * [THRIFT-2145] - Rack and Thin are not just development dependencies
+ * [THRIFT-2267] - Should be able to choose socket family in Python TSocket
+ * [THRIFT-2276] - java path in spec file needs updating
+ * [THRIFT-2281] - Generated send/recv code ignores errors returned by the underlying protocol
+ * [THRIFT-2280] - TJSONProtocol.Flush() does not really flush the transport
+ * [THRIFT-2274] - TNonblockingServer and TThreadedSelectorServer do not close their channel selectors on exit and leak file descriptors
+ * [THRIFT-2265] - php library doesn't build
+ * [THRIFT-2232] - IsSet* broken in Go
+ * [THRIFT-2246] - Unset enum value is printed by ToString()
+ * [THRIFT-2240] - thrift.vim (contrib) does not correctly handle 'union'
+ * [THRIFT-2243] - TNonblockingServer in thrift crashes when TFramedTransport opens
+ * [THRIFT-2230] - Cannot Build on RHEL/Centos/Amazon Linux 6.x
+ * [THRIFT-2247] - Go generator doesn't deal well with map keys of type binary
+ * [THRIFT-2253] - Python Tornado TTornadoServer base class change
+ * [THRIFT-2261] - java: error: unmappable character for encoding ASCII
+ * [THRIFT-2259] - C#: unexpected null logDelegate() pointer causes AV in TServer.serve()
+ * [THRIFT-2225] - SSLContext destroy before cleanupOpenSSL
+ * [THRIFT-2224] - TSSLSocket.h and TSSLServerSocket.h should use the platfromsocket too
+ * [THRIFT-2229] - thrift failed to build on OSX 10.9 GM
+ * [THRIFT-2227] - Thrift compiler generates spurious warnings with Xlint
+ * [THRIFT-2219] - Thrift gem fails to build on OS X Mavericks with 1.9.3 rubies
+ * [THRIFT-2226] - TServerSocket - keepAlive wrong initialization order
+ * [THRIFT-2285] - TJsonProtocol implementation for Java doesn't allow a slash (/) to be escaped (\/)
+ * [THRIFT-2216] - Extraneous semicolon in TProtocolUtil.h makes clang mad
+ * [THRIFT-2215] - Generated HTML/Graphviz lists referenced enum identifiers as UNKNOWN.
+ * [THRIFT-2211] - Exception constructor does not contain namespace prefix.
+ * [THRIFT-2210] - lib/java TSimpleJSONProtocol can emit invalid JSON
+ * [THRIFT-2209] - Ruby generator -- please namespace classes
+ * [THRIFT-2202] - Delphi TServerImpl.DefaultLogDelegate may stop the server with I/O-Error 105
+ * [THRIFT-2201] - Ternary operator returns different types (build error for some compilers)
+ * [THRIFT-2200] - nested structs cause generate_fingerprint() to slow down at excessive CPU load
+ * [THRIFT-2197] - fix jar output directory in rpm spec file
+ * [THRIFT-2196] - Fix invalid dependency in Makefile.am
+ * [THRIFT-2194] - Node: Not actually prepending residual data in TFramedTransport.receiver
+ * [THRIFT-2193] - Java code generator emits spurious semicolon when deep copying binary data
+ * [THRIFT-2191] - Fix charp JSONProtocol.ReadJSONDouble (specify InvariantCulture)
+ * [THRIFT-2214] - System header sys/param.h is included inside the Thrift namespace
+ * [THRIFT-2178] - Thrift generator returns error exit code on --version
+ * [THRIFT-2171] - NodeJS implementation has extremely low test coverage
+ * [THRIFT-2183] - gem install fails on zsh
+ * [THRIFT-2182] - segfault in regression tests (GC bug in rb_thrift_memory_buffer_write)
+ * [THRIFT-2181] - oneway calls don't work in NodeJS
+ * [THRIFT-2169] - JavaME Thrift Library causes "java.io.IOException: No Response Entries Available" after using the Thrift client for some time
+ * [THRIFT-2168] - Node.js appears broken (at least, examples don't work as intended)
+ * [THRIFT-2293] - TSSLTransportFactory.createSSLContext() leaves files open
+ * [THRIFT-2279] - TSerializer only returns the first 1024 bytes serialized
+ * [THRIFT-2278] - Buffered transport doesn't support writes > buffer size
+ * [THRIFT-2275] - Fix memory leak in golang compact_protocol.
+ * [THRIFT-2282] - Incorect code generated for some typedefs
+ * [THRIFT-2009] - Go redeclaration error
+ * [THRIFT-1964] - 'Isset' causes problems with C#/.NET serializers
+ * [THRIFT-2026] - Fix TCompactProtocol 64 bit builds
+ * [THRIFT-2108] - Fix TAsyncClientManager timeout race
+ * [THRIFT-2068] - Multiple calls from same connection are not processed in node
+ * [THRIFT-1750] - Make compiler build cleanly under visual studio 10
+ * [THRIFT-1755] - Comment parsing bug
+ * [THRIFT-1771] - "make check" fails on x64 for libboost_unit_test_framework.a
+ * [THRIFT-1841] - NodeJS Thrift incorrectly parses non-UTF8-string types
+ * [THRIFT-1908] - Using php thrift_protocol accelerated transfer causes core dump
+ * [THRIFT-1892] - Socket timeouts are declared in milli-seconds, but are actually set in micro-seconds
+ * [THRIFT-2303] - TBufferredTransport not properly closing underlying transport
+ * [THRIFT-2313] - nodejs server crash after processing the first request when using MultiplexedProcessor/FramedBuffer/BinaryProtocol
+ * [THRIFT-2311] - Go: invalid code generated when exception name is a go keyword
+ * [THRIFT-2308] - node: TJSONProtocol parse error when reading from buffered message
+ * [THRIFT-2316] - ccp: TFileTransportTest
+ * [THRIFT-2352] - msvc failed to compile thrift tests
+ * [THRIFT-2337] - Golang does not report TIMED_OUT exceptions
+ * [THRIFT-2340] - Generated server implementation does not send response type EXCEPTION on the Thrift.TApplicationExceptionType.UNKNOWN_METHOD exception
+ * [THRIFT-2354] - Connection errors can lead to case_clause exceptions
+ * [THRIFT-2339] - Uncaught exception in thrift c# driver
+ * [THRIFT-2356] - c++ thrift client not working with ssl (SSL_connect hangs)
+ * [THRIFT-2331] - Missing call to ReadStructBegin() in TApplicationException.Read()
+ * [THRIFT-2323] - Uncompileable Delphi code generated for typedef'd structs
+ * [THRIFT-2322] - Correctly show the number of times ExecutorService (java) has rejected the client.
+ * [THRIFT-2389] - namespaces handled wrongly in acrionscript 3.0 implementation
+ * [THRIFT-2388] - GoLang - Fix data races in simple_server and server_socket
+ * [THRIFT-2386] - Thrift refuses to link yylex
+ * [THRIFT-2375] - Excessive 's in generated HTML
+ * [THRIFT-2373] - warning CS0414 in THttpClient.cs: private field 'Thrift.Transport.THttpClient.connection' assigned but never used
+ * [THRIFT-2372] - thrift/json_protocol.go:160: function ends without a return statement
+ * [THRIFT-2371] - ruby bundler version fails on ~1.3.1, remove and take latest avail
+ * [THRIFT-2370] - Compiler SEGFAULTs generating HTML documentation for complex strucre
+ * [THRIFT-2384] - Binary map keys produce uncompilable code in go
+ * [THRIFT-2380] - unreachable code (CID 1174546, CID 1174679)
+ * [THRIFT-2378] - service method arguments of binary type lead to uncompileable Go code
+ * [THRIFT-2363] - Issue with character encoding of Success returned from Login using Thrift Proxy and NodeJS
+ * [THRIFT-2359] - TBufferedTransport doesn't clear it's buffer on a failed flush call
+ * [THRIFT-2428] - Python 3 setup.py support
+ * [THRIFT-2367] - Build failure: stdlib and boost both define uint64_t
+ * [THRIFT-2365] - C# decodes too many binary bytes from JSON
+ * [THRIFT-2402] - byte count of FrameBuffer in AWAITING_CLOSE state is not subtracted from readBufferBytesAllocated
+ * [THRIFT-2396] - Build Error on MacOSX
+ * [THRIFT-2395] - thrift Ruby gem requires development dependency 'thin' regardless of environment
+ * [THRIFT-2414] - c_glib fix several bug.
+ * [THRIFT-2420] - Go argument parser for methods without arguments does not skip fields
+ * [THRIFT-2439] - Bug in TProtocolDecorator Class causes parsing errors
+ * [THRIFT-2419] - golang - Fix fmt.Errorf in generated code
+ * [THRIFT-2418] - Go handler function panics on internal error
+ * [THRIFT-2405] - Node.js Multiplexer tests fail (silently)
+ * [THRIFT-2581] - TFDTransport destructor should not throw
+ * [THRIFT-2575] - Thrift includes siginfo_t within apache::thrift::protocol namespace
+ * [THRIFT-2577] - TFileTransport missuse of closesocket on windows platform
+ * [THRIFT-2576] - Implement Thrift.Protocol.prototype.skip method in JavaScript library
+ * [THRIFT-2588] - Thrift compiler is not buildable in Visual Studio 2010
+ * [THRIFT-2594] - JS Compiler: Single quotes are not being escaped in constants.
+ * [THRIFT-2591] - TFramedTransport does not handle payloads split across packets correctly
+ * [THRIFT-2599] - Uncompileable Delphi code due to naming conflicts with IDL
+ * [THRIFT-2590] - C++ Visual Studio solution doesn't include Multiplexing support
+ * [THRIFT-2595] - Node.js: Fix global leaks and copy-paste errors
+ * [THRIFT-2565] - autoconf fails to find mingw-g++ cross compiler on travis CI
+ * [THRIFT-2555] - excessive "unused field" comments
+ * [THRIFT-2554] - double initialization in generated Read() method
+ * [THRIFT-2551] - OutOfMemoryError "unable to create new native thread" kills serve thread
+ * [THRIFT-2543] - Generated enum type in haskell should be qualified
+ * [THRIFT-2560] - Thrift compiler generator tries to concat ints with strings using +
+ * [THRIFT-2559] - Centos 6.5 unable to "make" with Thrift 0.9.1
+ * [THRIFT-2526] - Assignment operators and copy constructors in c++ don't copy the __isset struct
+ * [THRIFT-2454] - c_glib: There is no gethostbyname_r() in some OS.
+ * [THRIFT-2451] - Do not use pointers for optional fields with defaults. Do not write such fields if its value set to default. Also, do not use pointers for any optional fields mapped to go map or slice. generate Get accessors
+ * [THRIFT-2450] - include HowToContribute in the src repo
+ * [THRIFT-2448] - thrift/test/test.sh has incorrect Node.js test path
+ * [THRIFT-2460] - unopened socket fd must be less than zero.
+ * [THRIFT-2459] - --version should not exit 1
+ * [THRIFT-2468] - Timestamp handling
+ * [THRIFT-2467] - Unable to build contrib/fb303 on OSX 10.9.2
+ * [THRIFT-2466] - Improper error handling for SSL/TLS connections that don't complete a handshake
+ * [THRIFT-2463] - test/py/RunClientServer.py fails sometimes
+ * [THRIFT-2458] - Generated golang server code for "oneway" methods is incorrect
+ * [THRIFT-2456] - THttpClient fails when using async support outside Silverlight
+ * [THRIFT-2524] - Visual Studio project is missing TThreadedServer files
+ * [THRIFT-2523] - Visual Studio project is missing OverlappedSubmissionThread files
+ * [THRIFT-2520] - cpp:cob_style generates incorrect .tcc file
+ * [THRIFT-2508] - Uncompileable C# code due to language keywords in IDL
+ * [THRIFT-2506] - Update TProtocolException error codes to be used consistently throughout the library
+ * [THRIFT-2505] - go: struct should always be a pointer to avoid copying of potentially size-unbounded structs
+ * [THRIFT-2515] - TLS Method error during make
+ * [THRIFT-2503] - C++: Fix name collision when a struct has a member named "val"
+ * [THRIFT-2477] - thrift --help text with misplaced comma
+ * [THRIFT-2492] - test/cpp does not compile on mac
+ * [THRIFT-2500] - sending random data crashes thrift(golang) service
+ * [THRIFT-2475] - c_glib: buffered_transport_write function return always TRUE.
+ * [THRIFT-2495] - JavaScript/Node string constants lack proper escaping
+ * [THRIFT-2491] - unable to import generated ThriftTest service
+ * [THRIFT-2490] - c_glib: if fail to read a exception from server, client may be occurred double free
+ * [THRIFT-2470] - THttpHandler swallows exceptions from processor
+ * [THRIFT-2533] - Boost version in requirements should be updated
+ * [THRIFT-2532] - Java version in installation requirements should be updated
+ * [THRIFT-2529] - TBufferedTransport split Tcp data bug in nodeJs
+ * [THRIFT-2537] - Path for "go get" does not work (pull request 115)
+ * [THRIFT-2443] - Node fails cross lang tests
+ * [THRIFT-2437] - Author fields in Python setup.py must be strings not lists.
+ * [THRIFT-2435] - Java compiler doesn't like struct member names that are identical to an existing enum or struct type
+ * [THRIFT-2434] - Missing namespace import for php TMultiplexedProcessor implementation
+ * [THRIFT-2432] - Flaky parallel build
+ * [THRIFT-2430] - Crash during TThreadPoolServer shutdown
+ * [THRIFT-667] - Period should not be allowed in identifier names
+ * [THRIFT-1212] - Members capital case conflict
+ * [THRIFT-2584] - Error handler not listened on javascript client
+ * [THRIFT-2294] - Incorrect Makefile generation
+ * [THRIFT-2601] - Fix vagrant to work again for builds again
+ * [THRIFT-2092] - TNonblocking server should release handler as soon as connection closes
+ * [THRIFT-2557] - CS0542 member names cannot be the same as their enclosing type
+ * [THRIFT-2605] - TSocket warning on gcc 4.8.3
+ * [THRIFT-2607] - ThreadManager.cpp warning on clang++ 3.4
+ * [THRIFT-1998] - TCompactProtocol.tcc - one more warning on Visual 2010
+ * [THRIFT-2610] - MSVC warning in TSocket.cpp
+ * [THRIFT-2614] - TNonblockingServer.cpp warnings on MSVC
+ * [THRIFT-2608] - TNonblockingServer.cpp warnings on clang 3.4
+ * [THRIFT-2606] - ThreadManager.h warning in clang++ 3.4
+ * [THRIFT-2609] - TFileTransport.h unused field warning (clang 3.4)
+ * [THRIFT-2416] - Cannot use TCompactProtocol with MSVC
+ * [THRIFT-1803] - Ruby Thrift 0.9.0 tries to encode UUID to UTF8 and crashes
+ * [THRIFT-2385] - Problem with gethostbyname2 during make check
+ * [THRIFT-2262] - thrift server 'MutateRow' operation gives no indication of success / failure
+ * [THRIFT-2048] - Prefer boolean context to nullptr_t conversion
+ * [THRIFT-2528] - Thrift Erlang Library: Multiple thrift applications in one bundle
+ * [THRIFT-1999] - warning on gcc 4.7 while compiling BoostMutex.cpp
+ * [THRIFT-2104] - Structs lose binary data when transferred from server to client in Java
+ * [THRIFT-2184] - undefined method rspec_verify for Thrift::MemoryBufferTransport
+ * [THRIFT-2351] - PHP TCompactProtocol has fails to decode messages
+ * [THRIFT-2016] - Resource Leak in thrift struct under compiler/cpp/src/parse/t_function.h
+ * [THRIFT-2273] - Please delete old releases from mirroring system
+ * [THRIFT-2270] - Faulty library version numbering at build or documentation
+ * [THRIFT-2203] - Tests keeping failing on Jenkins and Travis CI
+ * [THRIFT-2399] - thrift.el: recognize "//"-style comments in emacs thrift-mode
+ * [THRIFT-2582] - "FileTransport error" exception is raised when trying to use Java's TFileTransport
+ * [THRIFT-1682] - Multiple thread calling a Service function unsafely causes message corruption and terminates with Broken Pipe
+ * [THRIFT-2357] - recurse option has no effect when generating php
+ * [THRIFT-2248] - Go generator doesn't deal well with map keys of type binary
+ * [THRIFT-2426] - clarify IP rights and contributions from fbthrift
+ * [THRIFT-2041] - TNonblocking server compilation on windows (ARITHMETIC_RIGHT_SHIFT)
+ * [THRIFT-2400] - thrift.el: recognize "//"-style comments in emacs thrift-mode
+ * [THRIFT-1717] - Fix deb build in jenkins
+ * [THRIFT-2266] - ThreadManager.h:24:10: fatal error: 'tr1/functional' file not found on Mac 10.9 (Mavericks)
+ * [THRIFT-1300] - Test failures with parallel builds (make -j)
+ * [THRIFT-2487] - Tutorial requires two IDL files but only one is linked from the Thrift web site
+ * [THRIFT-2329] - missing release tags within git
+ * [THRIFT-2306] - concurent client calls with nodejs
+ * [THRIFT-2222] - ruby gem cannot be compiled on OS X mavericks
+ * [THRIFT-2381] - code which generated by thrift2/hbase.thrift compile error
+ * [THRIFT-2390] - no close event when connection lost
+ * [THRIFT-2146] - Unable to pass multiple "--gen" options to the thrift compiler
+ * [THRIFT-2438] - Unexpected readFieldEnd call causes JSON Parsing errors
+ * [THRIFT-2498] - Error message "Invalid method name" while trying to call HBase Thrift API
+ * [THRIFT-841] - Build cruft
+ * [THRIFT-2570] - Wrong URL given in http://thrift.apache.org/developers
+ * [THRIFT-2604] - Fix debian packaging
+ * [THRIFT-2618] - Unignore /aclocal files required for build
+ * [THRIFT-2562] - ./configure create MakeFile in lib/d with errors
+ * [THRIFT-2593] - Unable to build thrift on ubuntu-12.04 (Precise)
+ * [THRIFT-2461] - Can't install thrift-0.8.0 on OS X 10.9.2
+ * [THRIFT-2602] - Fix missing dist files
+ * [THRIFT-2620] - Fix python packaging
+ * [THRIFT-2545] - Test CPP fails to build (possibly typo)
+
+## Documentation
+ * [THRIFT-2155] - Adding one liner guide to rename the version.h.in and rename thrifty.cc.h
+ * [THRIFT-1991] - Add exceptions to examples
+ * [THRIFT-2334] - add a tutorial for node JS
+ * [THRIFT-2392] - Actionscript tutorial
+ * [THRIFT-2383] - contrib: sample for connecting Thrift with Rebus
+ * [THRIFT-2382] - contrib: sample for connecting Thrift with STOMP
+
+## Improvement
+ * [THRIFT-1457] - Capacity of TframedTransport write buffer is never reset
+ * [THRIFT-1135] - Node.js tutorial
+ * [THRIFT-1371] - Socket timeouts (SO_RCVTIMEO and SO_SNDTIMEO) not supported on Solaris
+ * [THRIFT-2142] - Minor tweaks to thrift.el for better emacs package compatibility
+ * [THRIFT-2268] - Modify TSaslTransport to ignore TCP health checks from loadbalancers
+ * [THRIFT-2264] - GitHub page incorrectly states that Thrift is still incubating
+ * [THRIFT-2263] - Always generate good hashCode for Java
+ * [THRIFT-2233] - Java compiler should defensively copy its binary inputs
+ * [THRIFT-2239] - Address FindBugs errors
+ * [THRIFT-2249] - Add SMP Build option to thrift.spec (and three config defines)
+ * [THRIFT-2254] - Exceptions generated by Go compiler should implement error interface
+ * [THRIFT-2260] - Thrift imposes unneeded dependency on commons-lang3
+ * [THRIFT-2258] - Add TLS v1.1/1.2 support to TSSLSocket.cpp
+ * [THRIFT-2205] - Node.js Test Server to support test.js JavaScript Browser test and sundry fixes
+ * [THRIFT-2204] - SSL client for the cocoa client
+ * [THRIFT-2172] - Java compiler allocates optionals array for every struct with an optional field
+ * [THRIFT-2185] - use cabal instead of runhaskell in haskell library
+ * [THRIFT-1926] - PHP Constant Generation Refactoring
+ * [THRIFT-2029] - Port C++ tests to Windows
+ * [THRIFT-2054] - TSimpleFileTransport - Java Lib has no straight forward TTransport based file transport
+ * [THRIFT-2040] - "uninitialized variable" warnings on MSVC/windows
+ * [THRIFT-2034] - Give developers' C++ code direct access to socket FDs on server side
+ * [THRIFT-2095] - Use print function for Python 3 compatiblity
+ * [THRIFT-1868] - Make the TPC backlog configurable in the Java servers
+ * [THRIFT-1813] - Add @Generated annotation to generated classes
+ * [THRIFT-1815] - Code generators line buffer output
+ * [THRIFT-2305] - TFramedTransport empty constructor should probably be private
+ * [THRIFT-2304] - Move client assignments from construtor in method
+ * [THRIFT-2309] - Ruby (gem) & PHP RPM subpackages
+ * [THRIFT-2318] - perl: dependency Class::Accessor not checked
+ * [THRIFT-2317] - exclude tutorial from build
+ * [THRIFT-2320] - Program level doctext does not get attached by parser
+ * [THRIFT-2349] - Golang - improve tutorial
+ * [THRIFT-2348] - PHP Generator: add array typehint to functions
+ * [THRIFT-2344] - configure.ac: compiler-only option
+ * [THRIFT-2343] - Golang - Return a single error for all exceptions instead of multiple return values
+ * [THRIFT-2341] - Enable generation of Delphi XMLDoc comments (a.k.a. "Help Insight")
+ * [THRIFT-2355] - Add SSL and Web Socket Support to Node and JavaScript
+ * [THRIFT-2350] - Add async calls to normal JavaScript
+ * [THRIFT-2330] - Generate PHPDoc comments
+ * [THRIFT-2332] - RPMBUILD: run bootstrap (if needed)
+ * [THRIFT-2391] - simple socket transport for actionscript 3.0
+ * [THRIFT-2376] - nodejs: allow Promise style calls for client and server
+ * [THRIFT-2369] - Add ssl support for nodejs implementation
+ * [THRIFT-2401] - Haskell tutorial compiles
+ * [THRIFT-2417] - C# Union classes are not partial
+ * [THRIFT-2415] - Named pipes server performance & message mode
+ * [THRIFT-2404] - emit warning on (typically inefficient) list
+ * [THRIFT-2398] - Improve Node Server Library
+ * [THRIFT-2397] - Add CORS and CSP support for JavaScript and Node.js libraries
+ * [THRIFT-2407] - use markdown (rename README => README.md)
+ * [THRIFT-2300] - D configure info output should follow same format as other languages
+ * [THRIFT-2579] - Windows CE support
+ * [THRIFT-2574] - Compiler option to generate namespace directories for Ruby
+ * [THRIFT-2571] - Simplify cross compilation using CMake
+ * [THRIFT-2569] - Introduce file to specify third party library locations on Windows
+ * [THRIFT-2568] - Implement own certificate handler
+ * [THRIFT-2552] - eliminate warning from configure.ac
+ * [THRIFT-2549] - Generate json tag for struct members. use go.tag annotation to override the default generated tag.
+ * [THRIFT-2544] - Add support for socket transport for c# library when using Windows Phone projects
+ * [THRIFT-2453] - haskell tutorial: fix up division by 0 example
+ * [THRIFT-2449] - Enhance typedef structure to distinguish between forwards and real typedefs
+ * [THRIFT-2446] - There is no way to handle server stream errors
+ * [THRIFT-2455] - Allow client certificates to be used with THttpClient
+ * [THRIFT-2511] - Node.js needs the compact protocol
+ * [THRIFT-2493] - Node.js lib needs HTTP client
+ * [THRIFT-2502] - Optimize go implementations of binary and compact protocols for speed
+ * [THRIFT-2494] - Add enum toString helper function in c_glib
+ * [THRIFT-2471] - Make cpp.ref annotation language agnostic
+ * [THRIFT-2497] - server and client for test/go, also several fixes and improvements
+ * [THRIFT-2535] - TJSONProtocol when serialized yields TField ids rather than names
+ * [THRIFT-2220] - Add a new struct structv?
+ * [THRIFT-1352] - Thrift server
+ * [THRIFT-989] - Push boost m4 macros upstream
+ * [THRIFT-1349] - Remove unnecessary print outs
+ * [THRIFT-2496] - server and client for test/go, also several fixes and improvements
+ * [THRIFT-1114] - Maven publish shouldn't require passwords hardcoded in settings.xml
+ * [THRIFT-2043] - visual 2010 warnings - unreachable code
+ * [THRIFT-1683] - Implement alternatives to Javascript Client side Transport protocol, just as NPAPI and WebSocket.
+ * [THRIFT-1746] - provide a SPDX file
+ * [THRIFT-1772] - Serialization does not check types of embedded structures.
+ * [THRIFT-2387] - nodejs: external imports should be centralized in index.js
+ * [THRIFT-2037] - More general macro THRIFT_UNUSED_VARIABLE
+
+## New Feature
+ * [THRIFT-1012] - Transport for DataInput DataOutput interface
+ * [THRIFT-2256] - Using c++11/c++0x std library replace boost library
+ * [THRIFT-2250] - JSON and MemoryBuffer for JavaME
+ * [THRIFT-2114] - Python Service Remote SSL Option
+ * [THRIFT-1719] - SASL client support for Python
+ * [THRIFT-1894] - Thrift multi-threaded async Java Server using Java 7 AsynchronousChannelGroup
+ * [THRIFT-1893] - HTTP/JSON server/client for node js
+ * [THRIFT-2347] - C# TLS Transport based on THRIFT-181
+ * [THRIFT-2377] - Allow addition of custom HTTP Headers to an HTTP Transport
+ * [THRIFT-2408] - Named Pipe Transport Option for C#
+ * [THRIFT-2572] - Add string/collection length limit checks (from C++) to java protocol readers
+ * [THRIFT-2469] - "java:fullcamel" option to automatically camel-case underscored attribute names
+ * [THRIFT-795] - Importing service functions (simulation multiple inheritance)
+ * [THRIFT-2164] - Add a Get/Post Http Server to Node along with examples
+ * [THRIFT-2255] - add Parent Class for generated Struct class
+
+## Question
+ * [THRIFT-2539] - Tsocket.cpp addrinfo ai_flags = AI_ADDRCONFIG
+ * [THRIFT-2440] - how to connect as3 to java by thrift ,
+ * [THRIFT-2379] - Memmory leaking while using multithreading in C++ server.
+ * [THRIFT-2277] - Thrift: installing fb303 error
+ * [THRIFT-2567] - Csharp slow ?
+ * [THRIFT-2573] - thrift 0.9.2 release
+
+## Sub-task
+ * [THRIFT-981] - cocoa: add version Info to the library
+ * [THRIFT-2132] - Go: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-2299] - TJsonProtocol implementation for Ruby does not allow for both possible slash (solidus) encodings
+ * [THRIFT-2298] - TJsonProtocol implementation for C# does not allow for both possible slash (solidus) encodings
+ * [THRIFT-2297] - TJsonProtocol implementation for Delphi does not allow for both possible slash (solidus) encodings
+ * [THRIFT-2271] - JavaScript: Support for Multiplexing Services
+ * [THRIFT-2251] - go test for compact protocol is not running
+ * [THRIFT-2195] - Delphi: Add event handlers for server and processing events
+ * [THRIFT-2176] - TSimpleJSONProtocol.ReadFieldBegin() does not return field type and ID
+ * [THRIFT-2175] - Wrong field type set for binary
+ * [THRIFT-2174] - Deserializing JSON fails in specific cases
+ * [THRIFT-2053] - NodeJS: Support for Multiplexing Services
+ * [THRIFT-1914] - Python: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-1810] - add ruby to test/test.sh
+ * [THRIFT-2310] - PHP: Client-side support for Multiplexing Services
+ * [THRIFT-2346] - C#: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2345] - Delphi: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol
+ * [THRIFT-2338] - First doctext wrongly interpreted as program doctext in some cases
+ * [THRIFT-2325] - SSL test certificates
+ * [THRIFT-2358] - C++: add compact protocol to cross language test suite
+ * [THRIFT-2425] - PHP: Server-side support for Multiplexing Services
+ * [THRIFT-2421] - Tree/Recursive struct support in thrift
+ * [THRIFT-2290] - Update Go tutorial to align with THRIFT-2232
+ * [THRIFT-2558] - CSharp compiler generator tries to concat ints with strings using +
+ * [THRIFT-2507] - Additional LUA TProtocolException error code needed?
+ * [THRIFT-2499] - Compiler: allow annotations without "= value"
+ * [THRIFT-2534] - Cross language test results should recorded to a status.md or status.html file automatically
+ * [THRIFT-66] - Java: Allow multiplexing multiple services over a single TCP connection
+ * [THRIFT-1681] - Add Lua Support
+ * [THRIFT-1727] - Ruby-1.9: data loss: "binary" fields are re-encoded
+ * [THRIFT-1726] - Ruby-1.9: "binary" fields are represented by string whose encoding is "UTF-8"
+ * [THRIFT-988] - perl: add version Info to the library via configure
+ * [THRIFT-334] - Compact Protocol for PHP
+ * [THRIFT-2444] - pull request 88: thrift: clean up enum value assignment
+
+## Task
+ * [THRIFT-2223] - Spam links on wiki
+ * [THRIFT-2566] - Please create a DOAP file for your TLP
+ * [THRIFT-2237] - Update archive to contain all versions
+ * [THRIFT-962] - Tutorial page on our website is really unhelpful
+
+## Test
+ * [THRIFT-2327] - nodejs: nodejs test suite should be bundled with the library
+ * [THRIFT-2445] - THRIFT-2384 (code generation for go maps with binary keys) should be tested
+ * [THRIFT-2501] - C# The test parameters from the TestServer and TestClient are different from the http://thrift.apache.org/test/
+
+## Wish
+ * [THRIFT-2190] - Add the JavaScript thrift.js lib to the Bower registry
+ * [THRIFT-2076] - boost::optional instead of __isset
+
+
+
+Thrift 0.9.1
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1440] - debian packaging: minor-ish policy problems
+ * [THRIFT-1402] - Generated Y_types.js does not require() X_types.js when an include in the IDL file was used
+ * [THRIFT-1551] - 2 thrift file define only struct (no service), one include another, the gen nodejs file didn't have "requires" at the top
+ * [THRIFT-1264] - TSocketClient is queried by run loop after deallocation in Cocoa
+ * [THRIFT-1600] - Thrift Go Compiler and Library out of date with Go 1 Release.
+ * [THRIFT-1603] - Thrift IDL allows for multiple exceptions, args or struct member names to be the same
+ * [THRIFT-1062] - Problems with python tutorials
+ * [THRIFT-864] - default value fails if identifier is a struct
+ * [THRIFT-930] - Ruby and Haskell bindings don't properly support DESTDIR (makes packaging painful)
+ * [THRIFT-820] - The readLength attribute of TBinaryProtocol is used as an instance variable and is decremented on each call of checkReadLength
+ * [THRIFT-1640] - None of the tutorials linked on the website contain content
+ * [THRIFT-1637] - NPM registry does not include version 0.8
+ * [THRIFT-1648] - NodeJS clients always receive 0 for 'double' values.
+ * [THRIFT-1660] - Python Thrift library can be installed with pip but not easy_install
+ * [THRIFT-1657] - Chrome browser sending OPTIONS method before POST in xmlHttpRequest
+ * [THRIFT-2118] - Certificate error handling still incorrect
+ * [THRIFT-2137] - Ruby test lib fails jenkins build #864
+ * [THRIFT-2136] - Vagrant build not compiling java, ruby, php, go libs due to missing dependencies
+ * [THRIFT-2135] - GO lib leaves behind test files that are auto generated
+ * [THRIFT-2134] - mingw-cross-compile script failing with strip errors
+ * [THRIFT-2133] - java TestTBinaryProtocol.java test failing
+ * [THRIFT-2126] - lib/cpp/src/thrift/concurrency/STD* files missing from DIST
+ * [THRIFT-2125] - debian missing from DIST
+ * [THRIFT-2124] - .o, .so, .la, .deps, .libs, gen-* files left tutorials, test and lib/cpp when making DIST
+ * [THRIFT-2123] - GO lib missing files in DIST build
+ * [THRIFT-2121] - Compilation bug for Node.js
+ * [THRIFT-2129] - php ext missing from dist
+ * [THRIFT-2128] - lib GO tests fail with funct ends without a return statement
+ * [THRIFT-2286] - Failed to compile Thrift0.9.1 with boost1.55 by VS2010 if select Debug-mt&x64 mode.
+ * [THRIFT-1973] - TCompactProtocol in C# lib does not serialize and deserialize negative int32 and int64 number correctly
+ * [THRIFT-1992] - casts in TCompactProtocol.tcc causing "dereferencing type-punned pointer will break strict-aliasing rules" warnings from gcc
+ * [THRIFT-1930] - C# generates unsigned byte for Thrift "byte" type
+ * [THRIFT-1929] - Update website to use Mirrors for downloads
+ * [THRIFT-1928] - Race may still exist in TFileTransport::flush()
+ * [THRIFT-1934] - Tabs in Example section on main page are not working
+ * [THRIFT-1933] - Delphi generator crashes when a typedef references another typedef from an included file
+ * [THRIFT-1942] - Binary accelerated cpp extension does not use Thrift namespaces for Exceptions
+ * [THRIFT-1959] - C#: Add Union TMemoryBuffer support
+ * [THRIFT-1958] - C#: Use static Object.Equals instead of .Equals() calls in equals
+ * [THRIFT-1957] - NodeJS TFramedTransport and TBufferedTransport read bytes as unsigned
+ * [THRIFT-1955] - Union Type writer generated in C# does not WriteStructBegin
+ * [THRIFT-1952] - Travis CI
+ * [THRIFT-1949] - WP7 build broken
+ * [THRIFT-1943] - docstrings for enum values are ignored
+ * [THRIFT-2070] - Improper `HexChar' and 'HexVal' implementation in TJSONProtocol.cs
+ * [THRIFT-2017] - Resource Leak in thrift struct under compiler/cpp/src/parse/t_program.h
+ * [THRIFT-2032] - C# client leaks sockets/handles
+ * [THRIFT-1996] - JavaME Constants generation is broken / inconsistent with regular Java generation
+ * [THRIFT-2002] - Haskell: Test use Data.Maybe instead of Maybe
+ * [THRIFT-2051] - Vagrant fails to build erlang
+ * [THRIFT-2050] - Vagrant C# lib compile fails with TException missing
+ * [THRIFT-1978] - Ruby: Thrift should allow for the SSL verify mode to be set
+ * [THRIFT-1984] - namespace collision in python bindings
+ * [THRIFT-1988] - When trying to build a debian package it fails as the file NEWS doesn't exist
+ * [THRIFT-1975] - TBinaryProtocol CheckLength can't be used for a client
+ * [THRIFT-1995] - '.' allowed at end of identifier generates non-compilable code
+ * [THRIFT-2112] - Error in Go generator when using typedefs in map keys
+ * [THRIFT-2088] - Typos in Thrift compiler help text
+ * [THRIFT-2080] - C# multiplex processor does not catch IOException
+ * [THRIFT-2082] - Executing "gmake clean" is broken
+ * [THRIFT-2102] - constants are not referencing to correct type when included from another thrift file
+ * [THRIFT-2100] - typedefs are not correctly referenced when including from other thrift files
+ * [THRIFT-2066] - 'make install' does not install two headers required for C++ bindings
+ * [THRIFT-2065] - Not valid constants filename in Java
+ * [THRIFT-2047] - Thrift.Protocol.TCompactProtocol, intToZigZag data lost (TCompactProtocol.cs)
+ * [THRIFT-2036] - Thrift gem warns about class variable access from top level
+ * [THRIFT-2057] - Vagrant fails on php tests
+ * [THRIFT-2105] - Generated code for default values of collections ignores t_field::T_REQUIRED
+ * [THRIFT-2091] - Unnecessary 'friend' declaration causes warning in TWinsockSingleton
+ * [THRIFT-2090] - Go generator, fix including of other thrift files
+ * [THRIFT-2106] - Fix support for namespaces in GO generator
+ * [THRIFT-1783] - C# doesn't handle required fields correctly
+ * [THRIFT-1782] - async only defined in silverlight
+ * [THRIFT-1779] - Missing process_XXXX method in generated TProcessor implementation for all 'oneway' service functions
+ * [THRIFT-1692] - SO_REUSEADDR allows for socket hijacking on Windows
+ * [THRIFT-1720] - JRuby times out on successful connection
+ * [THRIFT-1713] - Named and Anonymous Pipe transport (Delphi)
+ * [THRIFT-1699] - Native Union#read has extra read_field_end call
+ * [THRIFT-1749] - Python TSSLSocket error handling obscures actual error
+ * [THRIFT-1748] - Guard and RWGuard macros defined in global namespace
+ * [THRIFT-1734] - Front webpage is still advertising v0.8 as current release
+ * [THRIFT-1729] - C glib refactor left empty folders in svn
+ * [THRIFT-1767] - unions can't have required fields (Delphi)
+ * [THRIFT-1765] - Incorrect error message printed for null or negative keys
+ * [THRIFT-1778] - Configure requires manual intervention due to tar failure
+ * [THRIFT-1777] - TPipeServer is UNSTOPPABLE
+ * [THRIFT-1753] - Multiple C++ Windows, OSX, and iOS portability issues
+ * [THRIFT-1756] - 'make -j 8' fails with "unterminated #ifdef" error
+ * [THRIFT-1773] - Python library should run on python 2.4
+ * [THRIFT-1769] - unions can't have required fields (C++)
+ * [THRIFT-1768] - unions can't have required fields (Compiler)
+ * [THRIFT-1666] - htonll usage in TBinaryProtocol.tcc generates warning with MSVC2010
+ * [THRIFT-1919] - libthrift depends on httpcore-4.1.3 (directly) and httpcore-4.1.4 (transitively)
+ * [THRIFT-1864] - implement event handler for non-blocking server
+ * [THRIFT-1859] - Generated error c++ code with -out and include_prefix param
+ * [THRIFT-1869] - TThreadPoolServer (java) dies when threadpool is consumed
+ * [THRIFT-1842] - Memory leak with Pipes
+ * [THRIFT-1838] - Can't build compiler on OS X because of missing thrifty.h
+ * [THRIFT-1846] - Restore socket.h header to support builds with Android NDK
+ * [THRIFT-1850] - make check hangs on TSocket tests in TransportTest.cpp
+ * [THRIFT-1873] - Binary protocol factory ignores struct read/write flags
+ * [THRIFT-1872] - issues with TBufferedTransport buffer
+ * [THRIFT-1904] - Incorrect code is generated for typedefs which use included types
+ * [THRIFT-1903] - PHP namespaces cause binary protocols to not be used
+ * [THRIFT-1895] - Delphi: reserved variable name "result" not detected properly
+ * [THRIFT-1881] - TNonblockingServer does not release open connections or threads on shutdown
+ * [THRIFT-1888] - Java Thrift client can't connect to Python Thrift server on same host
+ * [THRIFT-1831] - Bug in list deserializer
+ * [THRIFT-1824] - many compile warning, becase Thread.h includes config.h
+ * [THRIFT-1823] - Missing parenthesis breaks "IS_..." macro in generated code
+ * [THRIFT-1806] - Python generation always truncates __init__.py files
+ * [THRIFT-1795] - Race condition in TThreadedServerPool java implementation
+ * [THRIFT-1794] - C# asyncctp broken
+ * [THRIFT-1804] - Binary+compact protocol single byte error in Ruby library (ARM architecture): caused by different char signedness
+ * [THRIFT-1800] - Documentation text not always escaped correctly when rendered to HTML
+ * [THRIFT-1788] - C#: Constants static constructor does not compile
+ * [THRIFT-1816] - Need "require" included thrift files in "xxx_types.js"
+ * [THRIFT-1907] - Compiling namespace and sub-namespace directives for unrecognized generators should only be a warning
+ * [THRIFT-1913] - skipping unknown fields in java unions
+ * [THRIFT-2553] - C++ linker error - transport/TSocket
+ * [THRIFT-274] - Towards a working release/versioning process
+
+## Documentation
+ * [THRIFT-1971] - [Graphviz] Adds tutorial/general description documentation
+ * [THRIFT-2001] - http://thrift.apache.org/ Example "C++ Server" tab is broken
+
+## Improvement
+ * [THRIFT-1574] - Apache project branding requirements: DOAP file [PATCH]
+ * [THRIFT-1347] - Unify the exceptions returned in generated Go code
+ * [THRIFT-1353] - Switch to performance branch, get rid of BinaryParser
+ * [THRIFT-1629] - Ruby 1.9 Compatibility during Thrift configure, make, install
+ * [THRIFT-991] - Refactor Haskell code and generator
+ * [THRIFT-990] - Sanify gettimeofday usage codebase-wide
+ * [THRIFT-791] - Let C++ TSimpleServer be driven by an external main loop
+ * [THRIFT-2117] - Cocoa TBinaryProtocol strictWrite should be set to true by default
+ * [THRIFT-2014] - Change C++ lib includes to use style throughout
+ * [THRIFT-1972] - Add support for async processors
+ * [THRIFT-1970] - [Graphviz] Adds option to render exceptions relationships
+ * [THRIFT-1966] - Support different files for SSL certificates and keys
+ * [THRIFT-1965] - Adds Graphviz (graph description language) generator
+ * [THRIFT-1956] - Switch to Apache Commons Lang 3
+ * [THRIFT-1962] - Multiplex processor should send any TApplicationException back to client
+ * [THRIFT-1960] - main() declares 22 unused gen bools
+ * [THRIFT-1951] - libthrift.jar has source files in it
+ * [THRIFT-1997] - Add accept backlog configuration method to TServerSocket
+ * [THRIFT-2003] - Deprecate senum
+ * [THRIFT-2052] - Vagrant machine image defaults to only 384MB of RAM
+ * [THRIFT-1980] - Modernize Go tooling, fix go client libary.
+ * [THRIFT-1977] - C# compiler should generate constant files prefixed with thrift file name
+ * [THRIFT-1985] - add a Vagrantfile to build and test Apache Thrift fully reproducable
+ * [THRIFT-1994] - Deprecate slist
+ * [THRIFT-1993] - Factory to create instances from known (generated) interface types with Delphi
+ * [THRIFT-2081] - Specified timeout should be used in TSocket.Open()
+ * [THRIFT-2084] - Delphi: Ability to create entity Thrift-generated instances based on TypeInfo
+ * [THRIFT-2083] - Improve the go lib: buffered Transport, save memory allocation, handle concurrent request
+ * [THRIFT-2109] - Secure connections should be supported in Go
+ * [THRIFT-2107] - minor Go generator fixes
+ * [THRIFT-1695] - allow warning-free compilation in VS 2012 and GNU 4.6
+ * [THRIFT-1735] - integrate tutorial into regular build
+ * [THRIFT-1716] - max allowed connections should be PIPE_UNLIMITED_INSTANCES
+ * [THRIFT-1715] - Allow excluding python parts when building contrib/fb303
+ * [THRIFT-1733] - Fix RPM build issues on RHEL6/OL6 systems
+ * [THRIFT-1728] - Upgradation of httpcomponents
+ * [THRIFT-1876] - Use enum names instead of casted integers in assignments
+ * [THRIFT-1874] - timeout for the server-side end of a named pipe
+ * [THRIFT-1897] - Support validation of required fields
+ * [THRIFT-1896] - Add TBase protocol for Cocoa
+ * [THRIFT-1880] - Make named pipes server work asynchronously (overlapped) to allow for clean server stops
+ * [THRIFT-1878] - Add the possibility to send custom headers
+ * [THRIFT-1882] - Use single include
+ * [THRIFT-1793] - C#: Use static read instead of instance read
+ * [THRIFT-1799] - Option to generate HTML in "standalone mode"
+ * [THRIFT-1815] - Code generators line buffer output
+ * [THRIFT-1890] - C++: Make named pipes server work asynchronously
+ * [THRIFT-474] - Generating Ruby on Rails friendly code
+
+## New Feature
+ * [THRIFT-801] - Provide an interactive shell (irb) when generating ruby bindings
+ * [THRIFT-2292] - Android Library Project
+ * [THRIFT-2012] - Modernizing Go
+ * [THRIFT-1969] - C#: Tests not properly linked from the solution
+ * [THRIFT-1785] - C#: Add TMemoryBuffer serializer/deserializer
+ * [THRIFT-1780] - Add option to generate nullable values
+ * [THRIFT-1786] - C# Union Typing
+ * [THRIFT-591] - Make the C++ runtime library be compatible with Windows and Visual Studio
+ * [THRIFT-514] - Add option to configure compiler output directory
+
+## Question
+ * [THRIFT-1764] - how to get the context of client when on a rpc call in server side?
+ * [THRIFT-1791] - thrift's namespace directive when generating haskell code
+
+## Sub-task
+ * [THRIFT-1594] - Java test clients should have a return codes that reflect whether it succeeds or not.
+ * [THRIFT-1595] - Java test server should follow the documented behavior as of THRIFT-1590
+ * [THRIFT-986] - st: add version Info to the library
+ * [THRIFT-985] - php: add version Info to the library
+ * [THRIFT-984] - ocaml: add version Info to the library
+ * [THRIFT-1924] - Delphi: Inconsistency in serialization of optional fields
+ * [THRIFT-1922] - C#: Inconsistency in serialization of optional fields
+ * [THRIFT-1961] - C# tests should be in lib/csharp/test/...
+ * [THRIFT-1822] - PHP unit test does not work
+ * [THRIFT-1902] - C++: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-1901] - C#: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-1899] - Delphi: Support for Multiplexing Services on any Transport, Protocol and Server
+ * [THRIFT-563] - Support for Multiplexing Services on any Transport, Protocol and Server
+
+
+
+Thrift 0.9
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1438] - lib/cpp/src/windows/config.h should read version from configure.ac rather than a #define
+ * [THRIFT-1446] - Compile error with Delphi 2009 in constant initializer
+ * [THRIFT-1450] - Problems building thrift 0.8.0 for Python and Ruby
+ * [THRIFT-1449] - Ruby client does not work on solaris (?)
+ * [THRIFT-1447] - NullpointerException in ProcessFunction.class :in "oneway" method
+ * [THRIFT-1433] - TServerSocket fix for MSVC
+ * [THRIFT-1429] - The nonblocking servers is supposed to use TransportFactory to read the data
+ * [THRIFT-1427] - PHP library uses non-multibyte safe functions with mbstring function overloading
+ * [THRIFT-1421] - Debian Packages can not be built
+ * [THRIFT-1394] - Treatment of optional fields is not consistent between C++ and Java
+ * [THRIFT-1511] - Server with oneway support ( JAVA )
+ * [THRIFT-1496] - PHP compiler not namespacing enums
+ * [THRIFT-1495] - PHP TestClient fatals on missing class
+ * [THRIFT-1508] - TServerSocket does not allow for the user to specify the IP address to bind to
+ * [THRIFT-1504] - Cocoa Generator should use local file imports for base Thrift headers
+ * [THRIFT-1512] - Thrift socket support for Windows XP
+ * [THRIFT-1502] - TSimpleServer::serve(): Do not print out error message if server was stopped.
+ * [THRIFT-1501] - PHP old namespaces not generated for enums
+ * [THRIFT-1483] - java compiler does not generate type parameters for services in extended clauses
+ * [THRIFT-1479] - Compiled PHP process functions missing writeMessageEnd()
+ * [THRIFT-1492] - enabling c_glib render thrift unusable (even for C++ code)
+ * [THRIFT-1491] - Uninitialize processorFactory_ member in TServer.h
+ * [THRIFT-1475] - Incomplete records generation for Erlang
+ * [THRIFT-1486] - Javascript manual testserver not returning content types
+ * [THRIFT-1488] - src/concurrency/Thread.h:91:58: error: invalid conversion from 'pthread_t {aka _opaque_pthread_t*}' to 'apache::thrift::concurrency::Thread::id_t {aka long long unsigned int}' [-fpermissive]
+ * [THRIFT-1490] - Windows-specific header files - fixes & tweaks
+ * [THRIFT-1526] - Union TupleSchemeFactory returns StandardSchemes
+ * [THRIFT-1527] - Generated implementation of tupleReadStruct in unions return null when the setfield is unrecognized
+ * [THRIFT-1524] - TNonBlockingServer does not compile in Visual Studio 2010
+ * [THRIFT-1529] - TupleProtocol can unintentionally include an extra byte in bit vectors when number of optional fields is an integral of 8
+ * [THRIFT-1473] - JSON context stack may be left in an incorrect state when an exception is thrown during read or write operations
+ * [THRIFT-1456] - System.Net.HttpWebRequest' does not contain a definition for 'Proxy'
+ * [THRIFT-1468] - Memory leak in TSaslServerTransport
+ * [THRIFT-1461] - Recent TNonblockingServer changes broke --enable-boostthreads=yes, Windows
+ * [THRIFT-1460] - why not add unicode strings support to python directly?
+ * [THRIFT-1464] - AbstractNonblockingServer.FrameBuffer TNonblockingTransport accessor changed from public to private
+ * [THRIFT-1467] - Possible AV with empty strings when using JSON protocol
+ * [THRIFT-1523] - clientTimeout not worked as expected in TServerSocket created by TSSLTransportFactory
+ * [THRIFT-1537] - TFramedTransport issues
+ * [THRIFT-1519] - Thirft Build Failure referencing rb_intern2 symbol
+ * [THRIFT-1518] - Generated C++ code only sends the first optional field in the write() function for a struct.
+ * [THRIFT-1515] - NameError: global name 'TApplicationException' is not defined
+ * [THRIFT-1554] - Inherited service methods are not resolved in derived service implementations
+ * [THRIFT-1553] - thrift nodejs service side can't read map structure, key as enum, value as Object
+ * [THRIFT-1575] - Typo in server/TThreadPoolServer.h
+ * [THRIFT-1327] - Fix Spec Suite under Ruby-1.8.7 (works for MRI Ruby-1.9.2)
+ * [THRIFT-1326] - on some platforms, #include is necessary to be included in Thrift.h
+ * [THRIFT-1159] - THttpClient->Flush() issue (connection thru proxy)
+ * [THRIFT-1277] - Node.js serializes false booleans as null
+ * [THRIFT-1224] - Cannot insert UTF-8 text
+ * [THRIFT-1267] - Node.js can't throw exceptions.
+ * [THRIFT-1338] - Do not use an unpatched autoconf 2.65 to generate release tarball
+ * [THRIFT-1128] - MAC OS X: thrift.h incompatibility with Thrift.h
+ * [THRIFT-1631] - Fix C++ server constructor typos
+ * [THRIFT-1602] - PHP C Extension is not Compatible with PHP 5.4
+ * [THRIFT-1610] - IWebProxy not available on WP7 platform
+ * [THRIFT-1606] - Race condition in BoostThreadFactory.cpp
+ * [THRIFT-1604] - Python exception handeling for changes from PEP 3110
+ * [THRIFT-1607] - Incorrect file modes for several source files
+ * [THRIFT-1583] - c_glib leaks memory
+ * [THRIFT-1582] - Bad includes of nested thrift files in c_glib
+ * [THRIFT-1578] - C_GLib generated code does not compile
+ * [THRIFT-1597] - TJSONProtocol.php is missing from Makefile.am
+ * [THRIFT-1591] - Enable TCP_NODELAY for ruby gem
+ * [THRIFT-1624] - Isset Generated differently on different platforms
+ * [THRIFT-1622] - Incorrect size returned on read
+ * [THRIFT-1621] - Memory leaks
+ * [THRIFT-1612] - Base64 encoding is broken
+ * [THRIFT-1627] - compiler built using compilers.vcxproj cannot be used to build some test .thrift files
+ * [THRIFT-1571] - Update Ruby HTTP transport for recent Ruby versions
+ * [THRIFT-1023] - Thrift encoding (UTF-8) issue with Ruby 1.9.2
+ * [THRIFT-1090] - Document the generation of a file called "Constants.java"
+ * [THRIFT-1082] - Thrift::FramedTransport sometimes calls close() on an undefined value
+ * [THRIFT-956] - Python module's version meta-data should be updated
+ * [THRIFT-973] - Cocoa library won't compile using clang
+ * [THRIFT-1632] - ruby: data corruption in thrift_native implementation of MemoryBufferTransport
+ * [THRIFT-1665] - TBinaryProtocol: exceeded message length raises generic TException
+ * [THRIFT-1664] - Reference to non-existing variable in build script
+ * [THRIFT-1663] - Java Thrift server is not throwing exceptions
+ * [THRIFT-1662] - "removeObject:" should be "removeObserver:" in [-TSocketServer dealloc]?
+ * [THRIFT-1643] - Denial of Service attack in TBinaryProtocol.readString
+ * [THRIFT-1674] - Update Thrift D library to be compatible with 2.060
+ * [THRIFT-1673] - Ruby compile flags for extension for multi arch builds (os x)
+ * [THRIFT-1655] - Configure still trying to use thrift_generators in output
+ * [THRIFT-1654] - c_glib thrift_socket_read() returns corrupted data
+ * [THRIFT-1653] - TThreadedSelectorServer leaks CLOSE_WAIT sockets
+ * [THRIFT-1658] - Java thrift server is not throwing TApplicationException
+ * [THRIFT-1656] - Setting proper headers in THttpServer.cpp so that "Cross-Origin Resource Sharing" on js client can work.
+ * [THRIFT-1652] - TSaslTransport does not log the error when kerberos auth fails
+ * [THRIFT-2272] - CLONE - Denial of Service attack in TBinaryProtocol.readString
+ * [THRIFT-2086] - Invalid generated code for Node.JS when using namespaces
+ * [THRIFT-1686] - t_php_generator.cc uses "and" instead of "&&", and causes compiler errors with Visual Studio
+ * [THRIFT-1693] - libthrift has dependency on two different versions of httpcore
+ * [THRIFT-1689] - don't exit(-1) in TNonblockingServer
+ * [THRIFT-1679] - NodeJS: protocol readString() should treat string as utf8, not binary
+ * [THRIFT-1721] - Dist broken due to 0.8.0 to 0.9.0 changes
+ * [THRIFT-1710] - Minor issues in test case code
+ * [THRIFT-1709] - Warning "Bitwise-or operator used on a sign-extended operand; consider casting to a smaller unsigned type first" in TBinaryProtocol.cs at ReadInt64()
+ * [THRIFT-1707] - [ruby] Adjust server_spec.rb for RSpec 2.11.x and Ruby 1.9.3
+ * [THRIFT-1671] - Cocoa code generator does not put keywords into generated method calls
+ * [THRIFT-1670] - Incompatibilities between different versions of a Thrift interface
+ * [THRIFT-1669] - NameError: global name 'TApplicationException' is not defined
+ * [THRIFT-1668] - Compile error in contrib/fb303, thrift/TDispatchProcessor.h: No such file or directory
+ * [THRIFT-1845] - Fix compiler warning caused by implicit string conversion with Xcode 4.6
+ * [THRIFT-304] - Building the Python library requires development headers
+ * [THRIFT-369] - sets and maps break equality
+ * [THRIFT-556] - Ruby compiler does not correctly referred to top-level modules when a submodule masks the top-level name
+ * [THRIFT-481] - indentation of ruby classes is off by a few
+
+## Improvement
+ * [THRIFT-1498] - Allow TThreadedPoolServer.Args to pass a ExecutorService
+ * [THRIFT-1444] - FunctionRunner - add syntactic sugar to create shared_ptrs
+ * [THRIFT-1443] - define a TProcessor helper class to implement process()
+ * [THRIFT-1441] - Generate constructor with parameters for exception class to let it update message property automatically.
+ * [THRIFT-1520] - Embed version number in erlang .app file
+ * [THRIFT-1480] - python: remove tabs, adjust whitespace and address PEP8 warnings
+ * [THRIFT-1485] - Performance: pass large and/or refcounted arguments as "const"
+ * [THRIFT-1484] - Introduce phpunit test suite
+ * [THRIFT-1532] - The type specifications in the generated Erlang code should include "undefined" where it's used as a default value
+ * [THRIFT-1534] - Required fields in the Delphi code generator.
+ * [THRIFT-1469] - Java isset space optimization
+ * [THRIFT-1465] - Visibility of methods in generated java code
+ * [THRIFT-1453] - Don't change types of arguments when serializing with thrift php extension
+ * [THRIFT-1452] - generate a swap() method for all generated structs
+ * [THRIFT-1451] - FramedTransport: Prevent infinite loop when writing
+ * [THRIFT-1521] - Two patches for more Performance
+ * [THRIFT-1555] - Delphi version of the tutorial code
+ * [THRIFT-1535] - Why thrift don't use wrapped class for optional fields ?
+ * [THRIFT-1204] - Ruby autogenerated files should require 'thrift' gem
+ * [THRIFT-1344] - Using the httpc module directly rather than the deprecated http layer
+ * [THRIFT-1343] - no_auto_import min/2 to avoid compile warning
+ * [THRIFT-1340] - Add support of ARC to Objective-C
+ * [THRIFT-1611] - Improved code generation for typedefs
+ * [THRIFT-1593] - Pass on errors like "connection closed" to the handler module
+ * [THRIFT-1615] - PHP Namespace
+ * [THRIFT-1567] - Thrift/cpp: Allow alternate classes to be used for
+ * [THRIFT-1072] - Missing - (id) initWithSharedProcessor in TSharedProcessorFactory.h
+ * [THRIFT-1650] - [ruby] Update clean items and svn:ignore entries for OS X artifacts
+ * [THRIFT-1661] - [PATCH] Add --with-qt4 configure option
+ * [THRIFT-1675] - Do we have any plan to support scala?
+ * [THRIFT-1645] - Replace Object#tee with more conventional Object#tap in specs
+ * [THRIFT-1644] - Upgrade RSpec to 2.10.x and refactor specs as needed
+ * [THRIFT-1672] - MonoTouch (and Mono for Android) compatibility
+ * [THRIFT-1702] - a thrift manual
+ * [THRIFT-1694] - Re-Enable serialization for WP7 Silverlight
+ * [THRIFT-1691] - Serializer/deserializer support for Delphi
+ * [THRIFT-1688] - Update IDL page markup
+ * [THRIFT-1725] - Tutorial web pages for Delphi and C#
+ * [THRIFT-1714] - [ruby] Explicitly add CWD to Ruby test_suites.rb
+ * [THRIFT-317] - Issues with Java struct validation
+ * [THRIFT-164] - Build web tutorial on Incubator web site
+ * [THRIFT-541] - Cocoa code generator doesn't put keywords before all arguments.
+ * [THRIFT-681] - The HTML generator does not handle JavaDoc style comments very well
+
+## New Feature
+ * [THRIFT-1500] - D programming language support
+ * [THRIFT-1510] - There should be an implementation of the JsonProtocol for ruby
+ * [THRIFT-1115] - python TBase class for dynamic (de)serialization, and __slots__ option for memory savings
+ * [THRIFT-1953] - support for asp.net mvc 3
+
+## Question
+ * [THRIFT-1235] - How could I use THttpServerTransportFactory withTNonBlockingServer
+ * [THRIFT-1368] - TNonblockingServer usage
+ * [THRIFT-1061] - Read an invalid frame size of 0. Are you using TFramedTransport on the client side?
+ * [THRIFT-491] - Ripping raw pthreads out of TFileTransport and associated test issues
+
+## Sub-task
+ * [THRIFT-1596] - Delphi: Test clients should have a return codes that reflect whether they succeeded or not
+ * [THRIFT-982] - javame: add version Info to the library
+ * [THRIFT-1722] - C# WP7 Assembly addition beaks mono build
+ * [THRIFT-336] - Compact Protocol in C#
+
+## Test
+ * [THRIFT-1613] - Add code back into empty source file ToStringTest.java
+ * [THRIFT-1718] - Incorrect check in TFileTransportTest
+
+## Wish
+ * [THRIFT-1463] - Decouple Thrift IDL from generators
+ * [THRIFT-1466] - Proper Documentation for Thrift C Glib
+ * [THRIFT-1539] - Build and distribute the fb303 python libraries along with thrift
+ * [THRIFT-1685] - Please add "aereo.com" to "Powered by Apache Thrift" list in about page
+ * [THRIFT-330] - TProcessor - additional method to called when connection is broken
+
+
+
+Thrift 0.8
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1436] - pip install thrift fails on Windows with "Unable to find vcvarsall.bat"
+ * [THRIFT-1432] - Javascript struct constants declared in the same file as their struct definition will cause an error
+ * [THRIFT-1428] - shared.thrft does not include namespace for php, so thrift compiler generate incorrect name
+ * [THRIFT-1426] - Dist package missing files for release 0.8
+ * [THRIFT-1425] - The Node package is incompatible with latest node (0.6) & npm (1.0.27)
+ * [THRIFT-1416] - Python Unit test is broken on ci
+ * [THRIFT-1419] - AbstractNonBlockingServer does not catch errors when invoking the processor
+ * [THRIFT-1424] - Ruby specs fail when run with rake
+ * [THRIFT-1420] - Nonblocking and HsHa server should make sure to close all their socket connections when the selector exits
+ * [THRIFT-1413] - Generated code does not read MapEnd / ListEnd / SetEnd
+ * [THRIFT-1409] - Name conflict check does not work properly for exception object(Delphi).
+ * [THRIFT-1408] - Delphi Test Server: Exception test case fails due to naming conflict with e.message
+ * [THRIFT-1407] - Typo in Python socket server causes Thrift to fail when we enable a global socket timout
+ * [THRIFT-1397] - CI server fails during build due to unused parameters in delphi generator
+ * [THRIFT-1404] - Delphi compiler generates struct reader code with problem.
+ * [THRIFT-1400] - Ruby native extension aborts with __stack_chk_fail in OSX
+ * [THRIFT-1399] - One of the TServerImpl.Create CTORs lacks implementation
+ * [THRIFT-1390] - Debian packages build fix for Squeeze (build from the official 0.7.0 tarball)
+ * [THRIFT-1393] - TTransportException's thrown from THttpClient contain superfluous slashes in the Exception message
+ * [THRIFT-1392] - Enabling both namespaces and autoloading in generated PHP code won't work.
+ * [THRIFT-1406] - Build error after applying THRIFT-1395
+ * [THRIFT-1405] - Delphi compiler does not generates container serializer properly.
+ * [THRIFT-1411] - java generator does not provide type parameter for TBaseProcessor
+ * [THRIFT-1473] - JSON context stack may be left in an incorrect state when an exception is thrown during read or write operations
+ * [THRIFT-1331] - Ruby library deserializes an empty map to nil
+ * [THRIFT-1330] - PHP Namespaces no longer generated
+ * [THRIFT-1328] - TBaseHelper.toString(...) appends ByteBuffer data outside of valid buffer range
+ * [THRIFT-1322] - OCaml lib fail to compile: Thrift.ml line 305, int vs int32 mismatch
+ * [THRIFT-1143] - Build doesn't detect correct architecture type on 64bit osx
+ * [THRIFT-1205] - port server unduly fragile with arbitrary input
+ * [THRIFT-1279] - type set is handled incorrectly when writing object
+ * [THRIFT-1298] - Standard scheme doesn't read or write metadata along with field values
+ * [THRIFT-1265] - C++ container deserialize
+ * [THRIFT-1263] - publish ruby client to rubygems
+ * [THRIFT-1384] - Java help menu missing newline near javame flag
+ * [THRIFT-1382] - Bundle install doesnot work because thrift crashes
+ * [THRIFT-1381] - Thrift C++ libs have incorrectly versioned names
+ * [THRIFT-1350] - Go library code does not build as of r60 (most recent release)
+ * [THRIFT-1365] - TupleProtocol#writeBitSet unintentionally writes a variable length byte array
+ * [THRIFT-1359] - --gen-cob cpp:cob_style does not compile anymore
+ * [THRIFT-1319] - Mismatch between how a union reads and writes a container
+ * [THRIFT-1309] - libfb303-0.7.0.jar missing in maven repository
+ * [THRIFT-1238] - Thrift JS client cannot read map of structures
+ * [THRIFT-1254] - Code can't be compiled against a regular JRE: Object.clone() override has a different return type
+ * [THRIFT-1367] - Mac OSX build fails with "no such file to load -- spec/rake/spectask"
+ * [THRIFT-1355] - Running make in lib/rb doesn't build the native extensions
+ * [THRIFT-1370] - Debian packaging should Build-Depend on libglib2.0-dev
+ * [THRIFT-1342] - Compilation problem on Windows of fastbinary.c
+ * [THRIFT-1341] - TProtocol.h endian detection wrong with boost
+ * [THRIFT-1583] - c_glib leaks memory
+ * [THRIFT-1582] - Bad includes of nested thrift files in c_glib
+ * [THRIFT-1578] - C_GLib generated code does not compile
+ * [THRIFT-1027] - 'make -j 16' fails with "unterminated #ifdef" error
+ * [THRIFT-1121] - Java server performance regression in 0.6
+ * [THRIFT-857] - tests run by "make install" fail if generators are disabled
+ * [THRIFT-380] - Use setuptools for python build
+
+## Dependency upgrade
+ * [THRIFT-1257] - thrift's dependency scope on javax.servlet:servlet-api should be 'provided'
+
+## Improvement
+ * [THRIFT-1445] - minor C++ generator variable cleanup
+ * [THRIFT-1435] - make TException.Message property conformant to the usual expectations
+ * [THRIFT-1431] - Rename 'sys' module to 'util'
+ * [THRIFT-1396] - Dephi generator has dependacy on boost 1.42 later.
+ * [THRIFT-1395] - Patch to prevent warnings for integer types in some cases
+ * [THRIFT-1275] - thrift: always prefix namespaces with " ::"
+ * [THRIFT-1274] - thrift: fail compilation if an unexpected token is
+ * [THRIFT-1271] - thrift: fix missing namespace in generated local
+ * [THRIFT-1270] - thrift: add --allow-neg-keys argument to allow
+ * [THRIFT-1345] - Allow building without tests
+ * [THRIFT-1286] - Modernize the Thrift Ruby Library Dev Environment
+ * [THRIFT-1284] - thrift: fix processor inheritance
+ * [THRIFT-1283] - thrift: wrap t_cpp_generator::generate_process_function() to 80
+ * [THRIFT-1282] - Upgrade httpclient to 4.1.2 (from 4.0.1)
+ * [THRIFT-1281] - add @generated to the docblock
+ * [THRIFT-1280] - Thrift: Improve Monitor exception-free interfaces
+ * [THRIFT-1278] - javadoc warnings - compilation
+ * [THRIFT-1227] - Erlang implementation of thrift JSON protocol
+ * [THRIFT-1295] - Duplicate include in TSocket.cpp
+ * [THRIFT-1294] - thrift: fix log message typos in TSimpleServer
+ * [THRIFT-1293] - thrift: improve handling of exceptions thrown by
+ * [THRIFT-1292] - thrift: silence log spew from TThreadedServer
+ * [THRIFT-1288] - Allow typedefed exceptions in throws clauses
+ * [THRIFT-1290] - thrift: TNonblockingServer: clean up state in the
+ * [THRIFT-1287] - thrift: start refactoring some of the C++ processor
+ * [THRIFT-1289] - thrift: implement TNonblockingServer::stop()
+ * [THRIFT-1305] - thrift: make TConnection a private inner class of
+ * [THRIFT-1304] - TNonblockingServer: pass in the connection context to
+ * [THRIFT-1302] - thrift: raise an exception if send() times out in
+ * [THRIFT-1301] - thrift: consolidate common code in TNonblockingServer
+ * [THRIFT-1377] - abort PHP deserialization on unknown field type
+ * [THRIFT-1379] - fix uninitialized enum values in thrift C++ objects
+ * [THRIFT-1376] - Make port specification option in thrift remote
+ * [THRIFT-1375] - fixed a hex char conversion bug in TJSONProtocol
+ * [THRIFT-1373] - Fix user-defined exception generation in thrift (python)
+ * [THRIFT-1361] - Optional replacement of pthread by boost::thread
+ * [THRIFT-1320] - Consistency of configure generated config.h
+ * [THRIFT-1317] - Remove copy constructibility from
+ * [THRIFT-1316] - thrift: update server classes to accept
+ * [THRIFT-1315] - thrift: generate server interface factory classes
+ * [THRIFT-1314] - thrift: add TProcessorFactory
+ * [THRIFT-1335] - Add accept timeout to TServerSocket
+ * [THRIFT-1334] - Add more info to IllegalStateException
+ * [THRIFT-1333] - Make RWGuard not copyable
+ * [THRIFT-1332] - TSSLTransportParameters class uses hard coded value keyManagerType: SunX509
+ * [THRIFT-1251] - Generated java code should indicate which fields are required and which are optional
+ * [THRIFT-1387] - Build MSVC libraries with Boost Threads instead of Pthreads
+ * [THRIFT-1339] - Extend Tuple Protocol to TUnions
+ * [THRIFT-1031] - Patch to compile Thrift for vc++ 9.0 and 10.0
+ * [THRIFT-1130] - Add the ability to specify symbolic default value for optional boolean
+ * [THRIFT-1123] - Patch to compile Thrift server and client for vc++ 9.0 and 10.0
+ * [THRIFT-386] - Make it possible to build the Python library without the extension
+
+## New Feature
+ * [THRIFT-1401] - JSON-protocol for Delphi XE Libraries
+ * [THRIFT-1167] - Java nonblocking server with more than one thread for select and handling IO
+ * [THRIFT-1366] - Delphi generator, lirbrary and unit test.
+ * [THRIFT-1354] - Add rake task to build just the gem file
+ * [THRIFT-769] - Pluggable Serializers
+
+## Sub-task
+ * [THRIFT-1415] - delphi: add version Info to the library
+ * [THRIFT-1391] - Improved Delphi XE test cases
+
+
+
+Thrift 0.7
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1140] - Framed Transport Client using C (Glib) Library hangs when connecting to Ruby Server
+ * [THRIFT-1154] - HttpClient does not specify the connection close parameter
+ * [THRIFT-1153] - HttpClient does not specify the connection close parameter
+ * [THRIFT-1149] - Nonblocking server fails when client connection is reset
+ * [THRIFT-1146] - Android Incompatibility : in Android < 2.3 java.io.IOException doesn't support for Throwable parameter in constructor
+ * [THRIFT-1133] - Java and JavaScript tutorial is broken since we have Java maven deployment
+ * [THRIFT-1132] - Deserialization error in TApplicationException C#
+ * [THRIFT-1131] - C# JSON Protocol is unable to decode escaped characters in string
+ * [THRIFT-1208] - python TCompactProtocol.py writeBool and readBool not follow the compact-proto-spec-2.txt spec for CONTAINER_WRITE, CONTAINER_READ
+ * [THRIFT-1200] - JS compiler generates code that clobbers existing namespaces
+ * [THRIFT-1183] - Pure-ruby CompactProtocol raises ArgumentError when deserializing under Ruby 1.9
+ * [THRIFT-1182] - Native deserializer segfaults on incorrect list element type
+ * [THRIFT-1181] - AS3 compiler generates incorrect code for setting default values in constructor
+ * [THRIFT-1234] - thrift --help is missing doc on py:utf8strings
+ * [THRIFT-1180] - AS3 compiler generates uncompilable code for binary types.
+ * [THRIFT-1194] - Java lib does not install artifacts to local dir correctly
+ * [THRIFT-1193] - Potential infinite loop in nonblocking_server
+ * [THRIFT-1192] - Typo: TProtocol.h tests for HAVE_SYS_PARAM_H_
+ * [THRIFT-1190] - readBufferBytesAllocated in TNonblockingServer.java should be AtomicLong to fix FD leakage and general server malfunction
+ * [THRIFT-1187] - nonblocking_server shutdown race under Ruby 1.9
+ * [THRIFT-1178] - Java: TBase signature should be T extends TBase,?>
+ * [THRIFT-1164] - Segmentation fault on NULL pointer in t_js_generator::generate_const
+ * [THRIFT-1171] - Perl write/readDouble assumes little-endian platform
+ * [THRIFT-1222] - Unhandled exception for TEvhttpServer request
+ * [THRIFT-1220] - TProcessor::process never returns false
+ * [THRIFT-1285] - Stable 0.7.0 Windows compiler exe available on the webside is not the good one
+ * [THRIFT-1218] - c_glib uses wrong name in pkg-config
+ * [THRIFT-1215] - Undefined property Thirft in lib/js/thrift.js
+ * [THRIFT-1211] - When using THttpClient, non 200 responses leave the connection open
+ * [THRIFT-1228] - The php accelerator module calls flush incorrectly
+ * [THRIFT-1308] - libfb303-0.7.0.jar missing in maven repository
+ * [THRIFT-1255] - Mismatch of method name between JavaME's lib and generated code (compareTo/compareObjects)
+ * [THRIFT-1253] - Code generated for maps is not compiling
+ * [THRIFT-1252] - Segfault in Ruby deserializer
+ * [THRIFT-1094] - bug in TCompactProto python readMessageEnd method and updated test cases
+ * [THRIFT-1093] - several bugs in python TCompactProtocol
+ * [THRIFT-1092] - generated validate() method has wrong indentation
+ * [THRIFT-1011] - Error generating package imports when using classes from other packages
+ * [THRIFT-1050] - Declaring an argument named "manager" to a service method produces code that fails compile due to name conflicts with protected ivars in TAsyncClient
+ * [THRIFT-1074] - .keystore and .truststore are missing from the 0.6.0 distribution
+ * [THRIFT-1067] - Tons of bugs in php implementation
+ * [THRIFT-1065] - Unexpected exceptions not proper handled on JS
+ * [THRIFT-1076] - Erlang Thrift socket server has a bug that causes java thrift client of framed binary client to throw "out of sequence" exception
+ * [THRIFT-1057] - casts in TBinaryProtocol.tcc causing "dereferencing type-punned pointer will break strict-aliasing rules" warnings from gcc
+ * [THRIFT-1055] - csharp TServerSocket and TSocket do not disable Nagle via Socket.NoDelay = true like cpp and java do
+ * [THRIFT-1054] - explicit call to PKG_PROG_PKG_CONFIG is missing and first use of PKG_CHECK_MODULES may not happen, causes mono detection to fail
+ * [THRIFT-1117] - JavaScript Unit Test does not work anymore because libthrift*.jar where moved by Maven Deployment
+ * [THRIFT-1111] - The HTML generator does not distinguish between string and binary types
+ * [THRIFT-1032] - "make dist" fails due to c_glib problem
+ * [THRIFT-1036] - Auto-generated C++ code fails to compile with "-Werror -Wextra -Wall" g++ compiler flags
+ * [THRIFT-1041] - TDeserializer holds onto a reference of the array it reads after it is done deserializing
+ * [THRIFT-1106] - C++ code TAsyncProtocolProcessor.h & TAsyncBufferProcessor.h dont have virtual functions but no virtual destructor. Causes warnings on -Wall
+ * [THRIFT-1105] - OCaml generator does not prefix methods of included structs with their type
+ * [THRIFT-1104] - INSTALLDIRS should be included in configure script
+ * [THRIFT-1102] - typo in configure.ac: "==" operator in 'test' (instead of"'=")
+ * [THRIFT-1101] - bytebuffer length calculation in TBinaryProtocol writeBinary
+ * [THRIFT-1098] - Undefined properties in TBinaryProtocolFactory
+ * [THRIFT-1081] - PHP tests broken and somewhat incomplete
+ * [THRIFT-1080] - erlang test's 'make' fails on Mac OSX
+ * [THRIFT-1078] - ThriftTest.thrift generates invalid PHP library
+ * [THRIFT-1120] - proto.WriteListEnd being called in the wrong place
+ * [THRIFT-1119] - TJSONProtocol fails to UTF8 decode strings
+ * [THRIFT-867] - PHP accelerator module's output transport is incompatible with TFramedTransport
+ * [THRIFT-826] - PHP TSocket Write Timeout
+ * [THRIFT-835] - Bad AS3 syntax in constructors that set default values
+ * [THRIFT-788] - thrift_protocol.so: multiget/multiget_slice does not handle more than 17 keys correctly
+ * [THRIFT-125] - OCaml libraries don't compile with 32-bit ocaml
+ * [THRIFT-342] - PHP: can't have sets of complex types
+ * [THRIFT-731] - configure doesn't check for ant >= 1.7
+ * [THRIFT-690] - Update TApplicationException codes
+ * [THRIFT-638] - BufferedTransport + C extensions block until recv timeout is reached on last fread call
+
+## Dependency upgrade
+ * [THRIFT-1177] - Update thrift to reflect changes in Go's networking libraries
+
+## Improvement
+ * [THRIFT-1155] - Remove log4j dependency from java client
+ * [THRIFT-1151] - Produce more informative runtime error in case of schema and data mismatch during serialization
+ * [THRIFT-1207] - Support DESTDIR on "make install" of ruby libs
+ * [THRIFT-1199] - Union structs should have generated methods to test whether a specific field is currently set
+ * [THRIFT-1233] - Remove unused include in generated C++ code
+ * [THRIFT-1189] - Ruby deserializer speed improvements
+ * [THRIFT-1170] - Thrift Generated Code and Java 5
+ * [THRIFT-1174] - Publish as3 client implementation via Maven for use by flex-mojos users
+ * [THRIFT-1225] - TCompactProtocol for PHP
+ * [THRIFT-1221] - Remove SimpleCallback.h
+ * [THRIFT-1217] - Use evutil_socketpair instead of pipe (Windows port)
+ * [THRIFT-1216] - build Java Library behind a proxy
+ * [THRIFT-1231] - Remove bogus include
+ * [THRIFT-1213] - Membuffer should provide a way to get back the buffer
+ * [THRIFT-1237] - Java fb303 missing some methods
+ * [THRIFT-1063] - Fix Erlang Tutorial Files
+ * [THRIFT-1053] - Make remote client's IP address available for all socket related transports
+ * [THRIFT-1109] - Deploy fb303 along side libthrift to maven repo
+ * [THRIFT-1107] - improvement for compiler-generated python for 'None' object comparisons
+ * [THRIFT-1069] - Add command line option to prevent thrift from inserting gen-* directories
+ * [THRIFT-1049] - Allow for TServerSocket python library to bind to a specific host
+ * [THRIFT-1126] - Extending struct_info for erlang bindings
+ * [THRIFT-1100] - python TSSLSocket improvements, including certificate validation
+ * [THRIFT-994] - Don't try to invoke phpize if we don't have it
+ * [THRIFT-993] - Some improvements in C++ stubs for oneway operations
+ * [THRIFT-997] - Using valueOf for base types in getFieldValue
+ * [THRIFT-418] - Don't do runtime sorting of struct fields
+ * [THRIFT-151] - TSSLServerSocket and TSSLSocket implementation
+ * [THRIFT-27] - Generated erlang types don't contain default values for records
+ * [THRIFT-113] - to-string methods should omit optional null fields from output
+ * [THRIFT-363] - Maven Deploy
+ * [THRIFT-447] - Make an abstract base Client class so we can generate less code
+ * [THRIFT-627] - should c++ have setters for optional fields?
+
+## New Feature
+ * [THRIFT-1236] - Erlang Reconnecting Thrift Client
+ * [THRIFT-1021] - Framed transport support for OCaml
+ * [THRIFT-1068] - Python SSL Socket Support
+ * [THRIFT-1103] - TZlibTransport for python, a zlib compressed transport
+ * [THRIFT-1083] - Preforking python process pool server
+ * [THRIFT-999] - Add TForkingServer
+
+## Sub-task
+ * [THRIFT-1152] - Attributes from private to protected
+ * [THRIFT-1038] - Generated Java code for structures containing binary fields (or collections thereof) are not serializable (in the Java sense) even though they implement java.io.Serializable
+
+## Task
+ * [THRIFT-892] - Refactor erlang build system with rebar
+
+## Wish
+ * [THRIFT-625] - Add support for 'Go'
+
+
+
+Thrift 0.6.1
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1133] - Java and JavaScript tutorial is broken since we have Java maven deployment
+ * [THRIFT-1131] - C# JSON Protocol is unable to decode escaped characters in string
+ * [THRIFT-1074] - .keystore and .truststore are missing from the 0.6.0 distribution
+
+## Improvement
+ * [THRIFT-1109] - Deploy fb303 along side libthrift to maven repo
+ * [THRIFT-363] - Maven Deploy
+
+## Question
+ * [THRIFT-1206] - did the THRIFT 0.6.1 merge THRIFT-563 ?
+
+## Sub-task
+ * [THRIFT-1163] - How can i use multi service in one program?
+
+## Task
+ * [THRIFT-1112] - Apply THRIFT-363 to 0.6 branch
+ * [THRIFT-1113] - Apply THRIFT-1074 to 0.6 branch
+
+
+
+Thrift 0.6
+--------------------------------------------------------------------------------
+## Bug
+ * [THRIFT-1020] - OCaml compiler generates invalid OCaml
+ * [THRIFT-1015] - TUnion does not handle ByteBuffer in toString
+ * [THRIFT-1013] - generated java code may have name clashes with thrift library
+ * [THRIFT-1009] - TUnion does not correctly deep copy a ByteBuffer
+ * [THRIFT-1032] - "make dist" fails due to c_glib problem
+ * [THRIFT-868] - Referencing constant values doesn't work with with typedef types
+ * [THRIFT-971] - java module can't be compiled without ivy and network connection
+ * [THRIFT-970] - Under heavy load, THttpClient may fail with "too many open files"
+ * [THRIFT-969] - Java Tutorial broken, move CalculatorHandler to a separate file
+ * [THRIFT-807] - JavaScript: Initialization of Base Types with 0 instead of null
+ * [THRIFT-955] - Thrift compiler for Windows uses lowercase names and directories which is inconsistent with compiling on other platforms
+ * [THRIFT-992] - Naming convention in C# constructor is not consistent with other fields causes compile errors
+ * [THRIFT-1008] - byte[] accessors throw NPE on unset field
+ * [THRIFT-1006] - Impossible to correctly qualify an enum constant in an external thrift file
+ * [THRIFT-950] - Haskell bindings treat 'byte' as unsigned 8-bit int (Data.Word.Word8), java/cpp as signed (byte/int8_t).
+ * [THRIFT-975] - lib/c_glib/README is missing => breaks make dist
+ * [THRIFT-944] - Support all version-4s of base
+ * [THRIFT-939] - optional binary fields throw NPE on default byte[] getters
+ * [THRIFT-935] - PHP Extension aborts the build if php-config is not installed
+ * [THRIFT-933] - Haskell's Thrift.cabal has warnings
+ * [THRIFT-932] - Haskell tests need to be run through 'make check' (and probably 'cabal check') too
+ * [THRIFT-904] - C# TSocket should disable nagle and linger
+ * [THRIFT-941] - Make PHP C Extension use the defined Protocol writeMessageBegin function
+ * [THRIFT-940] - 'make check' fails if boost is not in the std include and link paths
+ * [THRIFT-924] - Fix generated php structure constants
+ * [THRIFT-979] - ruby bindings used to work on jruby
+ * [THRIFT-977] - Hex Conversion Bug in C++ TJSONProtocol
+ * [THRIFT-347] - PHP TSocket Timeout Issues
+ * [THRIFT-517] - TExceptions thrown by server result in cryptic error message on client - Tried to read 4 bytes, but only got 0 bytes
+
+## Improvement
+ * [THRIFT-1024] - Add Python Twisted example to the Tutorial
+ * [THRIFT-958] - Change accessmodifer on trans_ field in the FrameBuffer class to public.
+ * [THRIFT-957] - THsHaServer: Change access modifier of the invoker field.
+ * [THRIFT-1002] - CodeStyle: t_c_glib_generator.cc
+ * [THRIFT-1005] - Give unions byte[] signature methods to go along with their ByteBuffer counterparts
+ * [THRIFT-951] - Add a new isServing() method to TServer
+ * [THRIFT-943] - Silly readme typo fix.
+ * [THRIFT-961] - JavaScript TestSuite using ant/ivy and Java's ServerTestBase Handler
+ * [THRIFT-960] - add TestServer, TestNonblockingServer and TestClient again
+ * [THRIFT-949] - Modify the TEnum interface so it defines a method similar to findByValue
+ * [THRIFT-946] - Augment FieldValueMetaData so it differentiates 'string' and 'binary' fields.
+ * [THRIFT-903] - custom ThreadFactory in THsHaServer
+ * [THRIFT-913] - Test Case for Url encoded strings + simple enhancement to lib/js/test/RunTestServer.sh
+ * [THRIFT-926] - Miscellaneous C++ improvements
+ * [THRIFT-929] - Improvements to the C++ test suite
+ * [THRIFT-893] - add JavaScript to the tutorial examples
+ * [THRIFT-1003] - Polishing c_glib code
+ * [THRIFT-71] - Debian packaging for thrift
+
+## New Feature
+ * [THRIFT-1033] - Node.js language target
+ * [THRIFT-947] - Provide a helper method to determine the TProtocol used to serialize some data.
+ * [THRIFT-928] - Make more statistics available in C++ servers
+ * [THRIFT-922] - Templatized [de]serialization code for C++
+ * [THRIFT-923] - Event-driven client and server support for C++
+ * [THRIFT-925] - Provide name<->value map for enums in C++
+ * [THRIFT-927] - Add option to modify the PHP include path
+ * [THRIFT-377] - TFileTransport port in Java
+ * [THRIFT-106] - TSSLServerSocket
+ * [THRIFT-582] - C implementation of Thrift
+ * [THRIFT-745] - Make it easier to instantiate servers
+
+## Sub-task
+ * [THRIFT-1038] - Generated Java code for structures containing binary fields (or collections thereof) are not serializable (in the Java sense) even though they implement java.io.Serializable
+
+## Task
+ * [THRIFT-862] - Async client issues / improvements
+
+## Test
+ * [THRIFT-581] - Add a testsuite for txThrift (Twisted)
+
+
+
+Thrift 0.5.0 - Incubating
+--------------------------------------------------------------------------------
+THRIFT-505 Build Make configure give a summary of the enabled components (David Reiss)
+THRIFT-506 Build Allow Thrift to be built without the C++ library (David Reiss)
+THRIFT-844 Build Build Requirements state autoconf 2.59+ is required, but 2.60+ is needed (Harlan Lieberman-Berg)
+THRIFT-850 Build Perl runtime requires Bit::Vector which may not be installed by default, but configure does not fail (Michael Lum)
+THRIFT-854 Build Provide configure option and make rules to build/install php extension (Anthony Molinaro)
+THRIFT-858 Build Have bootstrap.sh check for a suitable autoconf version before running (David Reiss)
+THRIFT-871 Build Thrift compiler for WIndows (binary distribution) (David Reiss)
+THRIFT-323 C# TJSONProtocol (Roger Meier)
+THRIFT-634 C# C# Compiler Generates Incorrect Code For Fields which begin with an uppercase letter (Jon S Akhtar)
+THRIFT-881 C# add csharp to the tutorial (Roger Meier)
+THRIFT-856 C++ Building cpp library fails on OS X with malloc and free not being declared in scope (James Clarke)
+THRIFT-865 C++ C++ compiler build depends on libfl even when flex/lex not detected (David Reiss)
+THRIFT-900 C++ Unix domain socket (Roger Meier)
+THRIFT-920 C++ C++ Test and Tutorial does not compile anymore due to the change within Enum handling (Roger Meier)
+THRIFT-567 C++ Can't immediately stop a TSimpleServer thread that is idle (Rush Manbert)
+THRIFT-756 C++ Exposing TSocket(int) constructor to public (Rajat Goel)
+THRIFT-798 C++ TNonblockingServer leaks resources when destroyed (David Reiss)
+THRIFT-812 C++, Python Demo of Thrift over ZeroMQ (David Reiss)
+THRIFT-629 Cocoa Unused Field In TSocketServer Appears To Break iPhone Build (Jon S Akhtar)
+THRIFT-838 Cocoa Generated Cocoa classes have useless @dynamic declarations (Kevin Ballard)
+THRIFT-805 Cocoa Don't generate process_XXXX methods for oneway methods (Brad Taylor)
+THRIFT-507 Compiler Remove the compiler's dependency on Boost (David Reiss)
+THRIFT-895 Compiler (General) Thrift compiler does not allow two different enumerations to have the same key name for one of the enum values (David Reiss)
+THRIFT-852 Compiler (General) Missing newline causes many compiler warnings (Anthony Molinaro)
+THRIFT-877 Compiler (General) smalltalk namespace doesn't work (Bruce Lowekamp)
+THRIFT-897 Compiler (General) Don't allow unqualified constant access to enum values (Bryan Duxbury)
+THRIFT-9 Compiler (General) Add a default namespace declaration for all languages (David Reiss)
+THRIFT-599 Erlang Don't use unnecessary processes in the Erlang transports and clients (David Reiss)
+THRIFT-646 Erlang Erlang library is missing install target (David Reiss)
+THRIFT-698 Erlang Generated module list should contain atoms, not strings (Anthony Molinaro)
+THRIFT-866 Erlang term() in spec definitions seems to not work in erlang R12 (Anthony Molinaro)
+THRIFT-886 Erlang Dialyzer warning (Anthony Molinaro)
+THRIFT-785 Erlang Framed transport server problems (Anthony Molinaro)
+THRIFT-884 HTML HTML Generator: add Key attribute to the Data Types Tables (Roger Meier)
+THRIFT-652 Haskell Generated field name for strut is not capitalized correctly (Christian Lavoie)
+THRIFT-743 Haskell compile error with GHC 6.12.1 (Christian Lavoie)
+THRIFT-901 Haskell Allow the bindings to compile without -fglasgow-exts and with -Wall -Werror (Christian Lavoie)
+THRIFT-905 Haskell Make haskell thrift bindings use automake to compile and install (Christian Lavoie)
+THRIFT-906 Haskell Improve type mappings (Christian Lavoie)
+THRIFT-914 Haskell Make haskell bindings 'easily' compilable (Christian Lavoie)
+THRIFT-918 Haskell Make haskell tests run again (Christian Lavoie)
+THRIFT-919 Haskell Update Haskell bindings README (Christian Lavoie)
+THRIFT-787 Haskell Enums are not read correctly (Christian Lavoie)
+THRIFT-250 Java ExecutorService as a constructor parameter for TServer (Ed Ceaser)
+THRIFT-693 Java Thrift compiler generated java code that throws compiler warnings about deprecated methods. (Bryan Duxbury)
+THRIFT-843 Java TNonblockingSocket connects without a timeout (Bryan Duxbury)
+THRIFT-845 Java async client does not respect timeout (Ning Liang)
+THRIFT-870 Java Java constants don't get Javadoc comments (Bryan Duxbury)
+THRIFT-873 Java Java tests fail due to Too many open files (Todd Lipcon)
+THRIFT-876 Java Add SASL support (Aaron T. Myers)
+THRIFT-879 Java Remove @Override from TUnion.clear (Dave Engberg)
+THRIFT-882 Java deep copy of binary fields does not copy ByteBuffer characteristics (arrayOffset, position) (Bryan Duxbury)
+THRIFT-888 Java async client should also have nonblocking connect (Eric Jensen)
+THRIFT-890 Java Java tutorial doesn't work (Todd Lipcon)
+THRIFT-894 Java Make default accessors for binary fields return byte[]; provide new accessors to get ByteBuffer version (Bryan Duxbury)
+THRIFT-896 Java TNonblockingSocket.isOpen() returns true even after close() (Eric Jensen)
+THRIFT-907 Java libfb303 doesn't compile in 0.4.0 (Todd Lipcon)
+THRIFT-912 Java Improvements and bug fixes to SASL implementation (Todd Lipcon)
+THRIFT-917 Java THsHaServer should not accept an ExecutorService without catching RejectedExecutionException (Ed Ceaser)
+THRIFT-931 Java Use log4j for Java tests (Todd Lipcon)
+THRIFT-880 JavaME JavaME code generator and runtime library (Dave Engberg)
+THRIFT-846 JavaScript JavaScript Test Framwork: extended Testcases (Roger Meier)
+THRIFT-885 JavaScript Url encoded strings never get decoded? How do we fix this? (T Jake Luciani)
+THRIFT-911 JavaScript (JavaScript compiler) Const structs, maps, sets, and lists generate a trailing comma (T Jake Luciani)
+THRIFT-860 OCaml copy method and reset method (Lev Walkin)
+THRIFT-682 PHP PHP extension doesn't compile on Mac OS X (Bryan Duxbury)
+THRIFT-851 PHP php extension fails to compile on centos 5.x (Todd Lipcon)
+THRIFT-840 Perl Perl protocol handler could be more robust against unrecognised types (Conrad Hughes)
+THRIFT-758 Perl incorrect deference in exception handling (Yann Kerherve)
+THRIFT-257 Python Support validation of required fields (Esteve Fernandez)
+THRIFT-335 Python Compact Protocol for Python (David Reiss)
+THRIFT-596 Python Make Python's TBufferedTransport use a configurable input buffer (David Reiss)
+THRIFT-597 Python Python THttpServer performance improvements (David Reiss)
+THRIFT-598 Python Allow Python's threading servers to use daemon threads (David Reiss)
+THRIFT-666 Python Allow the handler to override HTTP responses in THttpServer (David Reiss)
+THRIFT-673 Python Generated Python code has whitespace issues (Ian Eure)
+THRIFT-721 Python THttpClient ignores url parameters (Thomas Kho)
+THRIFT-824 Python TApplicationException.__str__() refers to class constants as globals (Peter Schuller)
+THRIFT-855 Python Include optimized compiled python objects in install (Anthony Molinaro)
+THRIFT-859 Python Allow py:twisted to be generated in different namespace than py (Bruce Lowekamp)
+THRIFT-869 Python TSocket.py on Mac (and FreeBSD) doesn't handle ECONNRESET from recv() (Steven Knight)
+THRIFT-875 Python Include python setup.cfg in dist (Anthony Molinaro)
+THRIFT-610 Ruby binary_protocol.rb segfaults [line 86] (Unassigned)
+THRIFT-899 Ruby Ruby read timeouts can sometimes be 2x what they should be (Ryan King)
+THRIFT-909 Ruby allow block argument to struct constructor (Michael Stockton)
+THRIFT-456 Test Suite Bad IP address string in test/cpp/src/main.cpp (Rush Manbert)
+
+
+Thrift 0.4.0 - Incubating
+--------------------------------------------------------------------------------
+THRIFT-650 Build Make Check fails on Centos/OSX with 0.2.0 tarball (Anthony Molinaro)
+THRIFT-770 Build Get 'make dist' to work without first compiling source code (Anthony Molinaro)
+THRIFT-160 C# Created THttpTransport for the C# library based on WebHttpRequest (Michael Greene)
+THRIFT-834 C# THttpClient resends contents of message after transport errors (Anatoly Fayngelerin)
+THRIFT-247 C++ THttpServer Transport (Unassigned)
+THRIFT-676 C++ Change C++ code generator so that generated classes can be wrapped with SWIG (Unassigned)
+THRIFT-570 Compiler Thrift compiler does not error when duplicate method names are present (Bruce Simpson)
+THRIFT-808 Compiler Segfault when constant declaration references a struct field that doesn't exist (Bryan Duxbury)
+THRIFT-646 Erlang Erlang library is missing install target (Anthony Molinaro)
+THRIFT-544 General multiple enums with the same key generate invalid code (Ben Taitelbaum)
+THRIFT-434 General ruby compiler should warn when a reserved word is used (Michael Stockton)
+THRIFT-799 General Files missing proper Apache license header (Bryan Duxbury)
+THRIFT-832 HTML HTML generator shows unspecified struct fields as 'required' (Bryan Duxbury)
+THRIFT-226 Java Collections with binary keys or values break equals() (Bryan Duxbury)
+THRIFT-484 Java Ability to use a slice of a buffer instead of a direct byte[] for binary fields (Bryan Duxbury)
+THRIFT-714 Java maxWorkerThreads parameter to THsHaServer has no effect (Bryan Duxbury)
+THRIFT-751 Java Add clear() method to TBase (Bryan Duxbury)
+THRIFT-765 Java Improved string encoding and decoding performance (Bryan Duxbury)
+THRIFT-768 Java Async client for Java (Bryan Duxbury)
+THRIFT-774 Java TDeserializer should provide a partialDeserialize method for primitive types (Piotr Kozikowski)
+THRIFT-783 Java .equals java method is broken on structs containing binary-type fields (Unassigned)
+THRIFT-804 Java CompareTo is broken for unions set to map, set, or list (Bryan Duxbury)
+THRIFT-814 Java Include a TServlet in the standard Thrift distribution (Mathias Herberts)
+THRIFT-818 Java Async client doesn't send method args (Bryan Duxbury)
+THRIFT-830 Java Switch binary field implementation from byte[] to ByteBuffer (Bryan Duxbury)
+THRIFT-831 Java FramedTransport implementation that reuses its buffers (Bryan Duxbury)
+THRIFT-833 Java build.xml in lib/java is missing a classpathref attribute for the javadoc task (Bryan Duxbury)
+THRIFT-836 Java Race condition causes CancelledKeyException in TAsyncClientManager (Bryan Duxbury)
+THRIFT-842 Java Upgrade to current version of commons-lang (2.5 instead of 2.4) and/or change dependency in ivy.xml to not be exact (Bryan Duxbury)
+THRIFT-815 JavaScript Deserialization of lists is critically broken. (T Jake Luciani)
+THRIFT-827 OCaml OCaml generator to take default values into account (Lev Walkin)
+THRIFT-647 PHP PHP library is missing install target (Anthony Molinaro)
+THRIFT-682 PHP PHP extension doesn't compile on Mac OS X (Bryan Duxbury)
+THRIFT-718 PHP Thrift PHP library includes closing tags and extraneous whitespace (Nicholas Telford)
+THRIFT-778 PHP PHP socket listening server (Nick Jones)
+THRIFT-780 PHP PHP extension sometimes causes an abort with two exceptions at the same time (David Reiss)
+THRIFT-837 PHP PHP accelerator bug for writes > 8k (Thomas Kho)
+THRIFT-782 Perl Perl code for writing containers doesn't count length of write*Begin or write*End (Conrad Hughes)
+THRIFT-395 Python Python library + compiler does not support unicode strings (Unassigned)
+THRIFT-133 Ruby 'namespace ruby' should error out, or be an alias to 'namespace rb' (Bryan Duxbury)
+THRIFT-664 Ruby Ruby extension fails to build with Ruby 1.9.1 (Rajesh Malepati)
+THRIFT-699 Ruby Excise unused "native protocol method table" stuff from thrift_native (Bryan Duxbury)
+THRIFT-767 Ruby ruby compiler does not keep comments for enum values (Bryan Duxbury)
+THRIFT-811 Ruby http_client_transport.rb: allow custom http headers (Tony Kamenick)
+THRIFT-459 Ruby Ruby installation always tries to write to /Library/Ruby/site (Matthieu Imbert)
+
+
+Thrift 0.1.0 - Incubating (not released)
+--------------------------------------------------------------------------------
+Compatibility Breaking Changes:
+ C++:
+ * It's quite possible that regenerating code and rebuilding will be
+ required. Make sure your headers match your libs!
+
+ Java:
+
+ Python:
+
+ Ruby:
+ * Generated files now have underscored names [THRIFT-421]
+ * The library has been rearranged to be more Ruby-like [THRIFT-276]
+
+ Erlang:
+ * Generated code will have to be regenerated, and the new code will
+ have to be deployed atomically with the new library code [THRIFT-136]
+
+New Features and Bug Fixes:
+ C++:
+ * Support for TCompactProtocol [THRIFT-333]
+
+ Java:
+ * Support for TCompactProtocol [THRIFT-110]
+
+ Python:
+ * Support for Twisted [THRIFT-148]
+
+ Ruby:
+ * Support for TCompactProtocol [THRIFT-332]
+
diff --git a/vendor/github.com/apache/thrift/CMakeLists.txt b/vendor/github.com/apache/thrift/CMakeLists.txt
new file mode 100644
index 000000000..9f57a66c4
--- /dev/null
+++ b/vendor/github.com/apache/thrift/CMakeLists.txt
@@ -0,0 +1,124 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+cmake_minimum_required(VERSION 3.1)
+
+# CMake 3.1 supports C++ standards selection with CMAKE_CXX_STANDARD
+# If you need CMake 3.1+ for Ubuntu 14.04, try
+# https://launchpad.net/~george-edison55/+archive/ubuntu/cmake-3.x
+# If you need CMake 3.1+ for debian "jessie", get it from jessie-backports
+# Otherwise
+# http://cmake.org
+
+project("Apache Thrift")
+
+set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/build/cmake")
+
+# TODO: add `git rev-parse --short HEAD`
+# Read the version information from the Autoconf file
+file (STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/configure.ac" CONFIGURE_AC REGEX "AC_INIT\\(.*\\)" )
+
+# The following variable is used in the version.h.in file
+string(REGEX REPLACE "AC_INIT\\(\\[.*\\], \\[([0-9]+\\.[0-9]+\\.[0-9]+(-dev)?)\\]\\)" "\\1" PACKAGE_VERSION ${CONFIGURE_AC})
+message(STATUS "Parsed Thrift package version: ${PACKAGE_VERSION}")
+
+# These are internal to CMake
+string(REGEX REPLACE "([0-9]+\\.[0-9]+\\.[0-9]+)(-dev)?" "\\1" thrift_VERSION ${PACKAGE_VERSION})
+string(REGEX REPLACE "([0-9]+)\\.[0-9]+\\.[0-9]+" "\\1" thrift_VERSION_MAJOR ${thrift_VERSION})
+string(REGEX REPLACE "[0-9]+\\.([0-9])+\\.[0-9]+" "\\1" thrift_VERSION_MINOR ${thrift_VERSION})
+string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+)" "\\1" thrift_VERSION_PATCH ${thrift_VERSION})
+message(STATUS "Parsed Thrift version: ${thrift_VERSION} (${thrift_VERSION_MAJOR}.${thrift_VERSION_MINOR}.${thrift_VERSION_PATCH})")
+
+# Some default settings
+include(DefineCMakeDefaults)
+
+# Build time options are defined here
+include(DefineOptions)
+include(DefineInstallationPaths)
+
+# Based on the options set some platform specifics
+include(DefinePlatformSpecifc)
+
+# Generate the config.h file
+include(ConfigureChecks)
+
+# Package it
+include(CPackConfig)
+
+
+find_package(Threads)
+
+include(CTest)
+if(BUILD_TESTING)
+ message(STATUS "Building with unittests")
+
+ enable_testing()
+ # Define "make check" as alias for "make test"
+ add_custom_target(check COMMAND ctest)
+else ()
+ message(STATUS "Building without tests")
+endif ()
+
+if(BUILD_COMPILER)
+ if(NOT EXISTS ${THRIFT_COMPILER})
+ set(THRIFT_COMPILER $)
+ endif()
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/compiler/cpp)
+elseif(EXISTS ${THRIFT_COMPILER})
+ add_executable(thrift-compiler IMPORTED)
+ set_property(TARGET thrift-compiler PROPERTY IMPORTED_LOCATION ${THRIFT_COMPILER})
+endif()
+
+if(BUILD_CPP)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/cpp)
+ if(BUILD_TUTORIALS)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tutorial/cpp)
+ endif()
+ if(BUILD_TESTING)
+ if(WITH_LIBEVENT AND WITH_ZLIB AND WITH_OPENSSL)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/cpp)
+ else()
+ message(WARNING "libevent and/or ZLIB and/or OpenSSL not found or disabled; will not build some tests")
+ endif()
+ endif()
+endif()
+
+if(BUILD_C_GLIB)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/c_glib)
+endif()
+
+if(BUILD_JAVA)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/java)
+endif()
+
+if(BUILD_PYTHON)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/py)
+ if(BUILD_TESTING)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/py)
+ endif()
+endif()
+
+if(BUILD_HASKELL)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/hs)
+ if(BUILD_TESTING)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/hs)
+ endif()
+endif()
+
+PRINT_CONFIG_SUMMARY()
diff --git a/vendor/github.com/apache/thrift/CONTRIBUTING.md b/vendor/github.com/apache/thrift/CONTRIBUTING.md
new file mode 100644
index 000000000..316da9a00
--- /dev/null
+++ b/vendor/github.com/apache/thrift/CONTRIBUTING.md
@@ -0,0 +1,49 @@
+## How to contribute
+ 1. Help to review and verify existing patches
+ 1. Make sure your issue is not all ready in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT)
+ 1. If not, create a ticket describing the change you're proposing in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT)
+ 1. Contribute your patch using one of the two methods below
+
+### Contributing via a patch
+
+1. Check out the latest version of the source code
+
+ * git clone https://git-wip-us.apache.org/repos/asf/thrift.git thrift
+
+1. Modify the source to include the improvement/bugfix
+
+ * Remember to provide *tests* for all submited changes
+ * When bugfixing: add test that will isolate bug *before* applying change that fixes it
+ * Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages)
+
+1. Create a patch from project root directory (e.g. you@dev:~/thrift $ ):
+
+ * git diff > ../thrift-XXX-my-new-feature.patch
+
+1. Attach the newly generated patch to the issue
+1. Wait for other contributors or committers to review your new addition
+1. Wait for a committer to commit your patch
+
+### Contributing via GitHub pull requests
+
+1. Create a fork for http://github.com/apache/thrift
+1. Create a branch for your changes(best practice is issue as branch name, e.g. THRIFT-9999)
+1. Modify the source to include the improvement/bugfix
+
+ * Remember to provide *tests* for all submited changes
+ * When bugfixing: add test that will isolate bug *before* applying change that fixes it
+ * Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages)
+ * Verify that your change works on other platforms by adding a GitHub service hook to [Travis CI](http://docs.travis-ci.com/user/getting-started/#Step-one%3A-Sign-in) and [AppVeyor](http://www.appveyor.com/docs)
+
+1. Commit and push changes to your branch (please use issue name and description as commit title, e.g. THRIFT-9999 make it perfect)
+1. Issue a pull request with the jira ticket number you are working on in it's name
+1. Wait for other contributors or committers to review your new addition
+1. Wait for a committer to commit your patch
+
+### More info
+
+ Plenty of information on why and how to contribute is available on the Apache Software Foundation (ASF) web site. In particular, we recommend the following:
+
+ * [Contributors Tech Guide](http://www.apache.org/dev/contributors)
+ * [Get involved!](http://www.apache.org/foundation/getinvolved.html)
+ * [Legal aspects on Submission of Contributions (Patches)](http://www.apache.org/licenses/LICENSE-2.0.html#contributions)
diff --git a/vendor/github.com/apache/thrift/Dockerfile b/vendor/github.com/apache/thrift/Dockerfile
new file mode 100644
index 000000000..2413b9181
--- /dev/null
+++ b/vendor/github.com/apache/thrift/Dockerfile
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Goal: provide a thrift-compiler Docker image
+#
+# Usage:
+# docker run -v "${PWD}:/data" thrift/thrift-compiler -gen cpp -o /data/ /data/test/ThriftTest.thrift
+#
+# further details on docker for thrift is here build/docker/
+#
+# TODO: push to apache/thrift-compiler instead of thrift/thrift-compiler
+
+FROM debian:jessie
+MAINTAINER Apache Thrift
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD . /thrift
+
+RUN buildDeps=" \
+ flex \
+ bison \
+ g++ \
+ make \
+ cmake \
+ curl \
+ "; \
+ apt-get update && apt-get install -y --no-install-recommends $buildDeps \
+ && mkdir /tmp/cmake-build && cd /tmp/cmake-build \
+ && cmake \
+ -DBUILD_COMPILER=ON \
+ -DBUILD_LIBRARIES=OFF \
+ -DBUILD_TESTING=OFF \
+ -DBUILD_EXAMPLES=OFF \
+ /thrift \
+ && cmake --build . --config Release \
+ && make install \
+ && curl -k -sSL "https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz" -o /tmp/go.tar.gz \
+ && tar xzf /tmp/go.tar.gz -C /tmp \
+ && cp /tmp/go/bin/gofmt /usr/bin/gofmt \
+ && apt-get purge -y --auto-remove $buildDeps \
+ && apt-get clean \
+ && rm -rf /tmp/* \
+ && rm -rf /var/lib/apt/lists/*
+
+ENTRYPOINT ["thrift"]
diff --git a/vendor/github.com/apache/thrift/LICENSE b/vendor/github.com/apache/thrift/LICENSE
new file mode 100644
index 000000000..3b6d7d74c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/LICENSE
@@ -0,0 +1,239 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+--------------------------------------------------
+SOFTWARE DISTRIBUTED WITH THRIFT:
+
+The Apache Thrift software includes a number of subcomponents with
+separate copyright notices and license terms. Your use of the source
+code for the these subcomponents is subject to the terms and
+conditions of the following licenses.
+
+--------------------------------------------------
+Portions of the following files are licensed under the MIT License:
+
+ lib/erl/src/Makefile.am
+
+Please see doc/otp-base-license.txt for the full terms of this license.
+
+--------------------------------------------------
+For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
+
+# Copyright (c) 2007 Thomas Porschberg
+#
+# Copying and distribution of this file, with or without
+# modification, are permitted in any medium without royalty provided
+# the copyright notice and this notice are preserved.
+
+--------------------------------------------------
+For the lib/nodejs/lib/thrift/json_parse.js:
+
+/*
+ json_parse.js
+ 2015-05-02
+ Public Domain.
+ NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+*/
+(By Douglas Crockford )
+--------------------------------------------------
diff --git a/vendor/github.com/apache/thrift/Makefile.am b/vendor/github.com/apache/thrift/Makefile.am
new file mode 100755
index 000000000..89a0adcb9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/Makefile.am
@@ -0,0 +1,131 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+ACLOCAL_AMFLAGS = -I ./aclocal
+
+if WITH_PLUGIN
+# To enable bootstrap, build order is lib/cpp -> compiler -> others
+SUBDIRS = lib/cpp compiler/cpp lib
+if WITH_TESTS
+SUBDIRS += lib/cpp/test
+endif
+else
+SUBDIRS = compiler/cpp lib
+endif
+
+if WITH_TESTS
+SUBDIRS += test
+endif
+
+if WITH_TUTORIAL
+SUBDIRS += tutorial
+endif
+
+dist-hook:
+ find $(distdir) -type f \( -iname ".DS_Store" -or -iname "._*" -or -iname ".gitignore" \) | xargs rm -rf
+ find $(distdir) -type d \( -iname ".deps" -or -iname ".libs" \) | xargs rm -rf
+ find $(distdir) -type d \( -iname ".svn" -or -iname ".git" \) | xargs rm -rf
+
+print-version:
+ @echo $(VERSION)
+
+.PHONY: precross cross
+precross-%: all
+ $(MAKE) -C $* precross
+precross: all precross-test precross-lib
+
+empty :=
+space := $(empty) $(empty)
+comma := ,
+
+CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ @MAYBE_RS@
+CROSS_LANGS_COMMA_SEPARATED = $(subst $(space),$(comma),$(CROSS_LANGS))
+
+if WITH_PY3
+CROSS_PY=$(PYTHON3)
+else
+CROSS_PY=$(PYTHON)
+endif
+
+if WITH_PYTHON
+crossfeature: precross
+ $(CROSS_PY) test/test.py --retry-count 3 --features .* --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED)
+else
+# feature test needs python build
+crossfeature:
+endif
+
+cross-%: precross crossfeature
+ $(CROSS_PY) test/test.py --retry-count 3 --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) --client $(CROSS_LANGS_COMMA_SEPARATED) --regex "$*"
+
+cross: cross-.*
+
+TIMES = 1 2 3
+fail: precross
+ $(CROSS_PY) test/test.py || true
+ $(CROSS_PY) test/test.py --update-expected-failures=overwrite
+ $(foreach var,$(TIMES),test/test.py -s || true;test/test.py --update-expected-failures=merge;)
+
+codespell_skip_files = \
+ *.jar \
+ *.class \
+ *.so \
+ *.a \
+ *.la \
+ *.o \
+ *.p12 \
+ *OCamlMakefile \
+ .keystore \
+ .truststore \
+ CHANGES \
+ config.sub \
+ configure \
+ depcomp \
+ libtool.m4 \
+ output.* \
+ rebar \
+ thrift
+
+skipped_files = $(subst $(space),$(comma),$(codespell_skip_files))
+
+style-local:
+ codespell --write-changes --skip=$(skipped_files) --disable-colors
+
+EXTRA_DIST = \
+ .clang-format \
+ .editorconfig \
+ .travis.yml \
+ appveyor.yml \
+ bower.json \
+ build \
+ CMakeLists.txt \
+ composer.json \
+ contrib \
+ CONTRIBUTING.md \
+ debian \
+ doc \
+ doap.rdf \
+ package.json \
+ sonar-project.properties \
+ Dockerfile \
+ LICENSE \
+ CHANGES \
+ NOTICE \
+ README.md \
+ Thrift.podspec
diff --git a/vendor/github.com/apache/thrift/NOTICE b/vendor/github.com/apache/thrift/NOTICE
new file mode 100644
index 000000000..c23995a23
--- /dev/null
+++ b/vendor/github.com/apache/thrift/NOTICE
@@ -0,0 +1,5 @@
+Apache Thrift
+Copyright 2006-2010 The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/vendor/github.com/apache/thrift/README.md b/vendor/github.com/apache/thrift/README.md
new file mode 100644
index 000000000..a55389a02
--- /dev/null
+++ b/vendor/github.com/apache/thrift/README.md
@@ -0,0 +1,164 @@
+Apache Thrift
+=============
+
+Last Modified: 2014-03-16
+
+License
+=======
+
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+
+Introduction
+============
+
+Thrift is a lightweight, language-independent software stack with an
+associated code generation mechanism for RPC. Thrift provides clean
+abstractions for data transport, data serialization, and application
+level processing. The code generation system takes a simple definition
+language as its input and generates code across programming languages that
+uses the abstracted stack to build interoperable RPC clients and servers.
+
+Thrift is specifically designed to support non-atomic version changes
+across client and server code.
+
+For more details on Thrift's design and implementation, take a gander at
+the Thrift whitepaper included in this distribution or at the README.md files
+in your particular subdirectory of interest.
+
+Hierarchy
+=========
+
+thrift/
+
+ compiler/
+
+ Contains the Thrift compiler, implemented in C++.
+
+ lib/
+
+ Contains the Thrift software library implementation, subdivided by
+ language of implementation.
+
+ cpp/
+ go/
+ java/
+ php/
+ py/
+ rb/
+
+ test/
+
+ Contains sample Thrift files and test code across the target programming
+ languages.
+
+ tutorial/
+
+ Contains a basic tutorial that will teach you how to develop software
+ using Thrift.
+
+Requirements
+============
+
+See http://thrift.apache.org/docs/install for an up-to-date list of build requirements.
+
+Resources
+=========
+
+More information about Thrift can be obtained on the Thrift webpage at:
+
+ http://thrift.apache.org
+
+Acknowledgments
+===============
+
+Thrift was inspired by pillar, a lightweight RPC tool written by Adam D'Angelo,
+and also by Google's protocol buffers.
+
+Installation
+============
+
+If you are building from the first time out of the source repository, you will
+need to generate the configure scripts. (This is not necessary if you
+downloaded a tarball.) From the top directory, do:
+
+ ./bootstrap.sh
+
+Once the configure scripts are generated, thrift can be configured.
+From the top directory, do:
+
+ ./configure
+
+You may need to specify the location of the boost files explicitly.
+If you installed boost in /usr/local, you would run configure as follows:
+
+ ./configure --with-boost=/usr/local
+
+Note that by default the thrift C++ library is typically built with debugging
+symbols included. If you want to customize these options you should use the
+CXXFLAGS option in configure, as such:
+
+ ./configure CXXFLAGS='-g -O2'
+ ./configure CFLAGS='-g -O2'
+ ./configure CPPFLAGS='-DDEBUG_MY_FEATURE'
+
+To enable gcov required options -fprofile-arcs -ftest-coverage enable them:
+
+ ./configure --enable-coverage
+
+Run ./configure --help to see other configuration options
+
+Please be aware that the Python library will ignore the --prefix option
+and just install wherever Python's distutils puts it (usually along
+the lines of /usr/lib/pythonX.Y/site-packages/). If you need to control
+where the Python modules are installed, set the PY_PREFIX variable.
+(DESTDIR is respected for Python and C++.)
+
+Make thrift:
+
+ make
+
+From the top directory, become superuser and do:
+
+ make install
+
+Note that some language packages must be installed manually using build tools
+better suited to those languages (at the time of this writing, this applies
+to Java, Ruby, PHP).
+
+Look for the README.md file in the lib// folder for more details on the
+installation of each language library package.
+
+Testing
+=======
+
+There are a large number of client library tests that can all be run
+from the top-level directory.
+
+ make -k check
+
+This will make all of the libraries (as necessary), and run through
+the unit tests defined in each of the client libraries. If a single
+language fails, the make check will continue on and provide a synopsis
+at the end.
+
+To run the cross-language test suite, please run:
+
+ make cross
+
+This will run a set of tests that use different language clients and
+servers.
diff --git a/vendor/github.com/apache/thrift/Thrift.podspec b/vendor/github.com/apache/thrift/Thrift.podspec
new file mode 100644
index 000000000..39d378053
--- /dev/null
+++ b/vendor/github.com/apache/thrift/Thrift.podspec
@@ -0,0 +1,18 @@
+Pod::Spec.new do |s|
+ s.name = "Thrift"
+ s.version = "1.0.0"
+ s.summary = "Apache Thrift is a lightweight, language-independent software stack with an associated code generation mechanism for RPC."
+ s.description = <<-DESC
+The Apache Thrift software framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages.
+ DESC
+ s.homepage = "http://thrift.apache.org"
+ s.license = { :type => 'Apache License, Version 2.0', :url => 'https://raw.github.com/apache/thrift/thrift-0.9.0/LICENSE' }
+ s.author = { "The Apache Software Foundation" => "apache@apache.org" }
+ s.requires_arc = true
+ s.ios.deployment_target = '7.0'
+ s.osx.deployment_target = '10.8'
+ s.ios.framework = 'CFNetwork'
+ s.osx.framework = 'CoreServices'
+ s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-1.0.0" }
+ s.source_files = 'lib/cocoa/src/**/*.{h,m,swift}'
+end
\ No newline at end of file
diff --git a/vendor/github.com/apache/thrift/aclocal/ac_prog_bison.m4 b/vendor/github.com/apache/thrift/aclocal/ac_prog_bison.m4
new file mode 100644
index 000000000..4d1198b94
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ac_prog_bison.m4
@@ -0,0 +1,54 @@
+dnl
+dnl Check Bison version
+dnl AC_PROG_BISON([MIN_VERSION=2.4])
+dnl
+dnl Will define BISON_USE_PARSER_H_EXTENSION if Automake is < 1.11
+dnl for use with .h includes.
+dnl
+
+AC_DEFUN([AC_PROG_BISON], [
+if test "x$1" = "x" ; then
+ bison_required_version="2.4"
+else
+ bison_required_version="$1"
+fi
+
+AC_CHECK_PROG(have_prog_bison, [bison], [yes],[no])
+
+AC_DEFINE_UNQUOTED([BISON_VERSION], [0.0], [Bison version if bison is not available])
+
+#Do not use *.h extension for parser header files, use newer *.hh
+bison_use_parser_h_extension=false
+
+if test "$have_prog_bison" = "yes" ; then
+ AC_MSG_CHECKING([for bison version >= $bison_required_version])
+ bison_version=`bison --version | head -n 1 | cut '-d ' -f 4`
+ AC_DEFINE_UNQUOTED([BISON_VERSION], [$bison_version], [Defines bison version])
+ if test "$bison_version" \< "$bison_required_version" ; then
+ BISON=:
+ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([Bison version $bison_required_version or higher must be installed on the system!])
+ else
+ AC_MSG_RESULT([yes])
+ BISON=bison
+ AC_SUBST(BISON)
+
+ #Verify automake version 1.11 headers for yy files are .h, > 1.12 uses .hh
+ automake_version=`automake --version | head -n 1 | cut '-d ' -f 4`
+ AC_DEFINE_UNQUOTED([AUTOMAKE_VERSION], [$automake_version], [Defines automake version])
+
+ if test "$automake_version" \< "1.12" ; then
+ #Use *.h extension for parser header file
+ bison_use_parser_h_extension=true
+ echo "Automake version < 1.12"
+ AC_DEFINE([BISON_USE_PARSER_H_EXTENSION], [1], [Use *.h extension for parser header file])
+ fi
+ fi
+else
+ BISON=:
+ AC_MSG_RESULT([NO])
+fi
+
+AM_CONDITIONAL([BISON_USE_PARSER_H_EXTENSION], [test x$bison_use_parser_h_extension = xtrue])
+AC_SUBST(BISON)
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_boost_base.m4 b/vendor/github.com/apache/thrift/aclocal/ax_boost_base.m4
new file mode 100644
index 000000000..b496020e4
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_boost_base.m4
@@ -0,0 +1,272 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_boost_base.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+#
+# DESCRIPTION
+#
+# Test for the Boost C++ libraries of a particular version (or newer)
+#
+# If no path to the installed boost library is given the macro searchs
+# under /usr, /usr/local, /opt and /opt/local and evaluates the
+# $BOOST_ROOT environment variable. Further documentation is available at
+# .
+#
+# This macro calls:
+#
+# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS)
+#
+# And sets:
+#
+# HAVE_BOOST
+#
+# LICENSE
+#
+# Copyright (c) 2008 Thomas Porschberg
+# Copyright (c) 2009 Peter Adolphs
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 23
+
+AC_DEFUN([AX_BOOST_BASE],
+[
+AC_ARG_WITH([boost],
+ [AS_HELP_STRING([--with-boost@<:@=ARG@:>@],
+ [use Boost library from a standard location (ARG=yes),
+ from the specified location (ARG=),
+ or disable it (ARG=no)
+ @<:@ARG=yes@:>@ ])],
+ [
+ if test "$withval" = "no"; then
+ want_boost="no"
+ elif test "$withval" = "yes"; then
+ want_boost="yes"
+ ac_boost_path=""
+ else
+ want_boost="yes"
+ ac_boost_path="$withval"
+ fi
+ ],
+ [want_boost="yes"])
+
+
+AC_ARG_WITH([boost-libdir],
+ AS_HELP_STRING([--with-boost-libdir=LIB_DIR],
+ [Force given directory for boost libraries. Note that this will override library path detection, so use this parameter only if default library detection fails and you know exactly where your boost libraries are located.]),
+ [
+ if test -d "$withval"
+ then
+ ac_boost_lib_path="$withval"
+ else
+ AC_MSG_ERROR(--with-boost-libdir expected directory name)
+ fi
+ ],
+ [ac_boost_lib_path=""]
+)
+
+if test "x$want_boost" = "xyes"; then
+ boost_lib_version_req=ifelse([$1], ,1.20.0,$1)
+ boost_lib_version_req_shorten=`expr $boost_lib_version_req : '\([[0-9]]*\.[[0-9]]*\)'`
+ boost_lib_version_req_major=`expr $boost_lib_version_req : '\([[0-9]]*\)'`
+ boost_lib_version_req_minor=`expr $boost_lib_version_req : '[[0-9]]*\.\([[0-9]]*\)'`
+ boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
+ if test "x$boost_lib_version_req_sub_minor" = "x" ; then
+ boost_lib_version_req_sub_minor="0"
+ fi
+ WANT_BOOST_VERSION=`expr $boost_lib_version_req_major \* 100000 \+ $boost_lib_version_req_minor \* 100 \+ $boost_lib_version_req_sub_minor`
+ AC_MSG_CHECKING(for boostlib >= $boost_lib_version_req)
+ succeeded=no
+
+ dnl On 64-bit systems check for system libraries in both lib64 and lib.
+ dnl The former is specified by FHS, but e.g. Debian does not adhere to
+ dnl this (as it rises problems for generic multi-arch support).
+ dnl The last entry in the list is chosen by default when no libraries
+ dnl are found, e.g. when only header-only libraries are installed!
+ libsubdirs="lib"
+ ax_arch=`uname -m`
+ case $ax_arch in
+ x86_64|ppc64|s390x|sparc64|aarch64)
+ libsubdirs="lib64 lib lib64"
+ ;;
+ esac
+
+ dnl allow for real multi-arch paths e.g. /usr/lib/x86_64-linux-gnu. Give
+ dnl them priority over the other paths since, if libs are found there, they
+ dnl are almost assuredly the ones desired.
+ AC_REQUIRE([AC_CANONICAL_HOST])
+ libsubdirs="lib/${host_cpu}-${host_os} $libsubdirs"
+
+ case ${host_cpu} in
+ i?86)
+ libsubdirs="lib/i386-${host_os} $libsubdirs"
+ ;;
+ esac
+
+ dnl first we check the system location for boost libraries
+ dnl this location ist chosen if boost libraries are installed with the --layout=system option
+ dnl or if you install boost with RPM
+ if test "$ac_boost_path" != ""; then
+ BOOST_CPPFLAGS="-I$ac_boost_path/include"
+ for ac_boost_path_tmp in $libsubdirs; do
+ if test -d "$ac_boost_path"/"$ac_boost_path_tmp" ; then
+ BOOST_LDFLAGS="-L$ac_boost_path/$ac_boost_path_tmp"
+ break
+ fi
+ done
+ elif test "$cross_compiling" != yes; then
+ for ac_boost_path_tmp in $lt_sysroot/usr $lt_sysroot/usr/local $lt_sysroot/opt $lt_sysroot/opt/local ; do
+ if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then
+ for libsubdir in $libsubdirs ; do
+ if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
+ done
+ BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir"
+ BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include"
+ break;
+ fi
+ done
+ fi
+
+ dnl overwrite ld flags if we have required special directory with
+ dnl --with-boost-libdir parameter
+ if test "$ac_boost_lib_path" != ""; then
+ BOOST_LDFLAGS="-L$ac_boost_lib_path"
+ fi
+
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
+ export CPPFLAGS
+
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
+ export LDFLAGS
+
+ AC_REQUIRE([AC_PROG_CXX])
+ AC_LANG_PUSH(C++)
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ @%:@include
+ ]], [[
+ #if BOOST_VERSION >= $WANT_BOOST_VERSION
+ // Everything is okay
+ #else
+ # error Boost version is too old
+ #endif
+ ]])],[
+ AC_MSG_RESULT(yes)
+ succeeded=yes
+ found_system=yes
+ ],[
+ ])
+ AC_LANG_POP([C++])
+
+
+
+ dnl if we found no boost with system layout we search for boost libraries
+ dnl built and installed without the --layout=system option or for a staged(not installed) version
+ if test "x$succeeded" != "xyes"; then
+ _version=0
+ if test "$ac_boost_path" != ""; then
+ if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
+ for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
+ _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
+ V_CHECK=`expr $_version_tmp \> $_version`
+ if test "$V_CHECK" = "1" ; then
+ _version=$_version_tmp
+ fi
+ VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
+ BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE"
+ done
+ fi
+ else
+ if test "$cross_compiling" != yes; then
+ for ac_boost_path in $lt_sysroot/usr $lt_sysroot/usr/local $lt_sysroot/opt $lt_sysroot/opt/local ; do
+ if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
+ for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
+ _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
+ V_CHECK=`expr $_version_tmp \> $_version`
+ if test "$V_CHECK" = "1" ; then
+ _version=$_version_tmp
+ best_path=$ac_boost_path
+ fi
+ done
+ fi
+ done
+
+ VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
+ BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE"
+ if test "$ac_boost_lib_path" = ""; then
+ for libsubdir in $libsubdirs ; do
+ if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
+ done
+ BOOST_LDFLAGS="-L$best_path/$libsubdir"
+ fi
+ fi
+
+ if test "x$BOOST_ROOT" != "x"; then
+ for libsubdir in $libsubdirs ; do
+ if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
+ done
+ if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then
+ version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'`
+ stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'`
+ stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'`
+ V_CHECK=`expr $stage_version_shorten \>\= $_version`
+ if test "$V_CHECK" = "1" -a "$ac_boost_lib_path" = "" ; then
+ AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT)
+ BOOST_CPPFLAGS="-I$BOOST_ROOT"
+ BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir"
+ fi
+ fi
+ fi
+ fi
+
+ CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
+ export CPPFLAGS
+ LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
+ export LDFLAGS
+
+ AC_LANG_PUSH(C++)
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ @%:@include
+ ]], [[
+ #if BOOST_VERSION >= $WANT_BOOST_VERSION
+ // Everything is okay
+ #else
+ # error Boost version is too old
+ #endif
+ ]])],[
+ AC_MSG_RESULT(yes)
+ succeeded=yes
+ found_system=yes
+ ],[
+ ])
+ AC_LANG_POP([C++])
+ fi
+
+ if test "$succeeded" != "yes" ; then
+ if test "$_version" = "0" ; then
+ AC_MSG_NOTICE([[We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]])
+ else
+ AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).])
+ fi
+ # execute ACTION-IF-NOT-FOUND (if present):
+ ifelse([$3], , :, [$3])
+ else
+ AC_SUBST(BOOST_CPPFLAGS)
+ AC_SUBST(BOOST_LDFLAGS)
+ AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available])
+ # execute ACTION-IF-FOUND (if present):
+ ifelse([$2], , :, [$2])
+ fi
+
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+fi
+
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_check_openssl.m4 b/vendor/github.com/apache/thrift/aclocal/ax_check_openssl.m4
new file mode 100644
index 000000000..a87c5a6b6
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_check_openssl.m4
@@ -0,0 +1,124 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_openssl.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_OPENSSL([action-if-found[, action-if-not-found]])
+#
+# DESCRIPTION
+#
+# Look for OpenSSL in a number of default spots, or in a user-selected
+# spot (via --with-openssl). Sets
+#
+# OPENSSL_INCLUDES to the include directives required
+# OPENSSL_LIBS to the -l directives required
+# OPENSSL_LDFLAGS to the -L or -R flags required
+#
+# and calls ACTION-IF-FOUND or ACTION-IF-NOT-FOUND appropriately
+#
+# This macro sets OPENSSL_INCLUDES such that source files should use the
+# openssl/ directory in include directives:
+#
+# #include
+#
+# LICENSE
+#
+# Copyright (c) 2009,2010 Zmanda Inc.
+# Copyright (c) 2009,2010 Dustin J. Mitchell
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 8
+
+AU_ALIAS([CHECK_SSL], [AX_CHECK_OPENSSL])
+AC_DEFUN([AX_CHECK_OPENSSL], [
+ found=false
+ AC_ARG_WITH([openssl],
+ [AS_HELP_STRING([--with-openssl=DIR],
+ [root of the OpenSSL directory])],
+ [
+ case "$withval" in
+ "" | y | ye | yes | n | no)
+ AC_MSG_ERROR([Invalid --with-openssl value])
+ ;;
+ *) ssldirs="$withval"
+ ;;
+ esac
+ ], [
+ # if pkg-config is installed and openssl has installed a .pc file,
+ # then use that information and don't search ssldirs
+ AC_PATH_PROG([PKG_CONFIG], [pkg-config])
+ if test x"$PKG_CONFIG" != x""; then
+ OPENSSL_LDFLAGS=`$PKG_CONFIG openssl --libs-only-L 2>/dev/null`
+ if test $? = 0; then
+ OPENSSL_LIBS=`$PKG_CONFIG openssl --libs-only-l 2>/dev/null`
+ OPENSSL_INCLUDES=`$PKG_CONFIG openssl --cflags-only-I 2>/dev/null`
+ found=true
+ fi
+ fi
+
+ # no such luck; use some default ssldirs
+ if ! $found; then
+ ssldirs="/usr/local/ssl /usr/lib/ssl /usr/ssl /usr/pkg /usr/local /usr"
+ fi
+ ]
+ )
+
+
+ # note that we #include , so the OpenSSL headers have to be in
+ # an 'openssl' subdirectory
+
+ if ! $found; then
+ OPENSSL_INCLUDES=
+ for ssldir in $ssldirs; do
+ AC_MSG_CHECKING([for openssl/ssl.h in $ssldir])
+ if test -f "$ssldir/include/openssl/ssl.h"; then
+ OPENSSL_INCLUDES="-I$ssldir/include"
+ OPENSSL_LDFLAGS="-L$ssldir/lib"
+ OPENSSL_LIBS="-lssl -lcrypto"
+ found=true
+ AC_MSG_RESULT([yes])
+ break
+ else
+ AC_MSG_RESULT([no])
+ fi
+ done
+
+ # if the file wasn't found, well, go ahead and try the link anyway -- maybe
+ # it will just work!
+ fi
+
+ # try the preprocessor and linker with our new flags,
+ # being careful not to pollute the global LIBS, LDFLAGS, and CPPFLAGS
+
+ AC_MSG_CHECKING([whether compiling and linking against OpenSSL works])
+ echo "Trying link with OPENSSL_LDFLAGS=$OPENSSL_LDFLAGS;" \
+ "OPENSSL_LIBS=$OPENSSL_LIBS; OPENSSL_INCLUDES=$OPENSSL_INCLUDES" >&AS_MESSAGE_LOG_FD
+
+ save_LIBS="$LIBS"
+ save_LDFLAGS="$LDFLAGS"
+ save_CPPFLAGS="$CPPFLAGS"
+ LDFLAGS="$LDFLAGS $OPENSSL_LDFLAGS"
+ LIBS="$OPENSSL_LIBS $LIBS"
+ CPPFLAGS="$OPENSSL_INCLUDES $CPPFLAGS"
+ AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM([#include ], [SSL_new(NULL)])],
+ [
+ AC_MSG_RESULT([yes])
+ $1
+ ], [
+ AC_MSG_RESULT([no])
+ $2
+ ])
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+
+ AC_SUBST([OPENSSL_INCLUDES])
+ AC_SUBST([OPENSSL_LIBS])
+ AC_SUBST([OPENSSL_LDFLAGS])
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_cxx_compile_stdcxx_11.m4 b/vendor/github.com/apache/thrift/aclocal/ax_cxx_compile_stdcxx_11.m4
new file mode 100644
index 000000000..a9a8f584f
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_cxx_compile_stdcxx_11.m4
@@ -0,0 +1,165 @@
+# ============================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx_11.html
+# ============================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX_11([ext|noext],[mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the C++11
+# standard; if necessary, add switches to CXXFLAGS to enable support.
+#
+# The first argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for an extended mode.
+#
+# The second argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline C++11 support is required and that the macro
+# should error out if no mode with that support is found. If specified
+# 'optional', then configuration proceeds regardless, after defining
+# HAVE_CXX11 if and only if a supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik
+# Copyright (c) 2012 Zack Weinberg
+# Copyright (c) 2013 Roy Stogner
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 10
+
+m4_define([_AX_CXX_COMPILE_STDCXX_11_testbody], [[
+ template
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ struct Base {
+ virtual void f() {}
+ };
+ struct Child : public Base {
+ virtual void f() override {}
+ };
+
+ typedef check> right_angle_brackets;
+
+ int a;
+ decltype(a) b;
+
+ typedef check check_type;
+ check_type c;
+ check_type&& cr = static_cast(c);
+
+ auto d = a;
+ auto l = [](){};
+ // Prevent Clang error: unused variable 'l' [-Werror,-Wunused-variable]
+ struct use_l { use_l() { l(); } };
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function because of this
+ namespace test_template_alias_sfinae {
+ struct foo {};
+
+ template
+ using member = typename T::member_type;
+
+ template
+ void func(...) {}
+
+ template
+ void func(member*) {}
+
+ void test();
+
+ void test() {
+ func(0);
+ }
+ }
+]])
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX_11], [dnl
+ m4_if([$1], [], [],
+ [$1], [ext], [],
+ [$1], [noext], [],
+ [m4_fatal([invalid argument `$1' to AX_CXX_COMPILE_STDCXX_11])])dnl
+ m4_if([$2], [], [ax_cxx_compile_cxx11_required=true],
+ [$2], [mandatory], [ax_cxx_compile_cxx11_required=true],
+ [$2], [optional], [ax_cxx_compile_cxx11_required=false],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX_11])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+ AC_CACHE_CHECK(whether $CXX supports C++11 features by default,
+ ax_cv_cxx_compile_cxx11,
+ [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])],
+ [ax_cv_cxx_compile_cxx11=yes],
+ [ax_cv_cxx_compile_cxx11=no])])
+ if test x$ax_cv_cxx_compile_cxx11 = xyes; then
+ ac_success=yes
+ fi
+
+ m4_if([$1], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for switch in -std=gnu++11 -std=gnu++0x; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch,
+ $cachevar,
+ [ac_save_CXXFLAGS="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXXFLAGS="$ac_save_CXXFLAGS"])
+ if eval test x\$$cachevar = xyes; then
+ CXXFLAGS="$CXXFLAGS $switch"
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$1], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ for switch in -std=c++11 -std=c++0x; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch,
+ $cachevar,
+ [ac_save_CXXFLAGS="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXXFLAGS="$ac_save_CXXFLAGS"])
+ if eval test x\$$cachevar = xyes; then
+ CXXFLAGS="$CXXFLAGS $switch"
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx11_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++11 language features is required.])
+ fi
+ else
+ if test x$ac_success = xno; then
+ HAVE_CXX11=0
+ AC_MSG_NOTICE([No compiler with C++11 support was found])
+ else
+ HAVE_CXX11=1
+ AC_DEFINE(HAVE_CXX11,1,
+ [define if the compiler supports basic C++11 syntax])
+ fi
+
+ AC_SUBST(HAVE_CXX11)
+ fi
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_dmd.m4 b/vendor/github.com/apache/thrift/aclocal/ax_dmd.m4
new file mode 100644
index 000000000..13b84b021
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_dmd.m4
@@ -0,0 +1,107 @@
+dnl @synopsis AX_DMD
+dnl
+dnl Test for the presence of a DMD-compatible D2 compiler, and (optionally)
+dnl specified modules on the import path.
+dnl
+dnl If "DMD" is defined in the environment, that will be the only
+dnl dmd command tested. Otherwise, a hard-coded list will be used.
+dnl
+dnl After AX_DMD runs, the shell variables "success" and "ax_dmd" are set to
+dnl "yes" or "no", and "DMD" is set to the appropriate command. Furthermore,
+dnl "dmd_optlink" will be set to "yes" or "no" depending on whether OPTLINK is
+dnl used as the linker (DMD/Windows), and "dmd_of_dirsep" will be set to the
+dnl directory separator to use when passing -of to DMD (OPTLINK requires a
+dnl backslash).
+dnl
+dnl AX_CHECK_D_MODULE must be run after AX_DMD. It tests for the presence of a
+dnl module in the import path of the chosen compiler, and sets the shell
+dnl variable "success" to "yes" or "no".
+dnl
+dnl @category D
+dnl @version 2011-05-31
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copyright (C) 2011 David Nadlinger
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+
+AC_DEFUN([AX_DMD],
+ [
+ dnl Hard-coded default commands to test.
+ DMD_PROGS="dmd,gdmd,ldmd"
+
+ dnl Allow the user to specify an alternative.
+ if test -n "$DMD" ; then
+ DMD_PROGS="$DMD"
+ fi
+
+ AC_MSG_CHECKING(for DMD)
+
+ # std.algorithm as a quick way to check for D2/Phobos.
+ echo "import std.algorithm; void main() {}" > configtest_ax_dmd.d
+ success=no
+ oIFS="$IFS"
+
+ IFS=","
+ for DMD in $DMD_PROGS ; do
+ IFS="$oIFS"
+
+ echo "Running \"$DMD configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
+ if $DMD configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
+ success=yes
+ break
+ fi
+ done
+
+ if test "$success" != "yes" ; then
+ AC_MSG_RESULT(no)
+ DMD=""
+ else
+ AC_MSG_RESULT(yes)
+ fi
+
+ ax_dmd="$success"
+
+ # Test whether OPTLINK is used by trying if DMD accepts -L/? without
+ # erroring out.
+ if test "$success" == "yes" ; then
+ AC_MSG_CHECKING(whether DMD uses OPTLINK)
+ echo "Running \”$DMD -L/? configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
+ if $DMD -L/? configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
+ AC_MSG_RESULT(yes)
+ dmd_optlink="yes"
+
+ # This actually produces double slashes in the final configure
+ # output, but at least it works.
+ dmd_of_dirsep="\\\\"
+ else
+ AC_MSG_RESULT(no)
+ dmd_optlink="no"
+ dmd_of_dirsep="/"
+ fi
+ fi
+
+ rm -f configtest_ax_dmd*
+ ])
+
+
+AC_DEFUN([AX_CHECK_D_MODULE],
+ [
+ AC_MSG_CHECKING(for D module [$1])
+
+ echo "import $1; void main() {}" > configtest_ax_dmd.d
+
+ echo "Running \"$DMD configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
+ if $DMD -c configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
+ AC_MSG_RESULT(yes)
+ success=yes
+ else
+ AC_MSG_RESULT(no)
+ success=no
+ fi
+
+ rm -f configtest_ax_dmd*
+ ])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_javac_and_java.m4 b/vendor/github.com/apache/thrift/aclocal/ax_javac_and_java.m4
new file mode 100644
index 000000000..f341f50e7
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_javac_and_java.m4
@@ -0,0 +1,129 @@
+dnl @synopsis AX_JAVAC_AND_JAVA
+dnl @synopsis AX_CHECK_JAVA_CLASS(CLASSNAME)
+dnl
+dnl Test for the presence of a JDK, and (optionally) specific classes.
+dnl
+dnl If "JAVA" is defined in the environment, that will be the only
+dnl java command tested. Otherwise, a hard-coded list will be used.
+dnl Similarly for "JAVAC".
+dnl
+dnl AX_JAVAC_AND_JAVA does not currently support testing for a particular
+dnl Java version, testing for only one of "java" and "javac", or
+dnl compiling or running user-provided Java code.
+dnl
+dnl After AX_JAVAC_AND_JAVA runs, the shell variables "success" and
+dnl "ax_javac_and_java" are set to "yes" or "no", and "JAVAC" and
+dnl "JAVA" are set to the appropriate commands.
+dnl
+dnl AX_CHECK_JAVA_CLASS must be run after AX_JAVAC_AND_JAVA.
+dnl It tests for the presence of a class based on a fully-qualified name.
+dnl It sets the shell variable "success" to "yes" or "no".
+dnl
+dnl @category Java
+dnl @version 2009-02-09
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+
+AC_DEFUN([AX_JAVAC_AND_JAVA],
+ [
+
+ dnl Hard-coded default commands to test.
+ JAVAC_PROGS="javac,jikes,gcj -C"
+ JAVA_PROGS="java,kaffe"
+
+ dnl Allow the user to specify an alternative.
+ if test -n "$JAVAC" ; then
+ JAVAC_PROGS="$JAVAC"
+ fi
+ if test -n "$JAVA" ; then
+ JAVA_PROGS="$JAVA"
+ fi
+
+ AC_MSG_CHECKING(for javac and java)
+
+ echo "public class configtest_ax_javac_and_java { public static void main(String args@<:@@:>@) { } }" > configtest_ax_javac_and_java.java
+ success=no
+ oIFS="$IFS"
+
+ IFS=","
+ for JAVAC in $JAVAC_PROGS ; do
+ IFS="$oIFS"
+
+ echo "Running \"$JAVAC configtest_ax_javac_and_java.java\"" >&AS_MESSAGE_LOG_FD
+ if $JAVAC configtest_ax_javac_and_java.java >&AS_MESSAGE_LOG_FD 2>&1 ; then
+
+ # prevent $JAVA VM issues with UTF-8 path names (THRIFT-3271)
+ oLC_ALL="$LC_ALL"
+ LC_ALL=""
+
+ IFS=","
+ for JAVA in $JAVA_PROGS ; do
+ IFS="$oIFS"
+
+ echo "Running \"$JAVA configtest_ax_javac_and_java\"" >&AS_MESSAGE_LOG_FD
+ if $JAVA configtest_ax_javac_and_java >&AS_MESSAGE_LOG_FD 2>&1 ; then
+ success=yes
+ break 2
+ fi
+
+ done
+
+ # restore LC_ALL
+ LC_ALL="$oLC_ALL"
+ oLC_ALL=""
+
+ fi
+
+ done
+
+ rm -f configtest_ax_javac_and_java.java configtest_ax_javac_and_java.class
+
+ if test "$success" != "yes" ; then
+ AC_MSG_RESULT(no)
+ JAVAC=""
+ JAVA=""
+ else
+ AC_MSG_RESULT(yes)
+ fi
+
+ ax_javac_and_java="$success"
+
+ ])
+
+
+AC_DEFUN([AX_CHECK_JAVA_CLASS],
+ [
+ AC_MSG_CHECKING(for Java class [$1])
+
+ echo "import $1; public class configtest_ax_javac_and_java { public static void main(String args@<:@@:>@) { } }" > configtest_ax_javac_and_java.java
+
+ echo "Running \"$JAVAC configtest_ax_javac_and_java.java\"" >&AS_MESSAGE_LOG_FD
+ if $JAVAC configtest_ax_javac_and_java.java >&AS_MESSAGE_LOG_FD 2>&1 ; then
+ AC_MSG_RESULT(yes)
+ success=yes
+ else
+ AC_MSG_RESULT(no)
+ success=no
+ fi
+
+ rm -f configtest_ax_javac_and_java.java configtest_ax_javac_and_java.class
+ ])
+
+
+AC_DEFUN([AX_CHECK_ANT_VERSION],
+ [
+ AC_MSG_CHECKING(for ant version > $2)
+ ANT_VALID=`expr $($1 -version 2>/dev/null | sed -n 's/.*version \(@<:@0-9\.@:>@*\).*/\1/p') \>= $2`
+ if test "x$ANT_VALID" = "x1" ; then
+ AC_MSG_RESULT(yes)
+ else
+ AC_MSG_RESULT(no)
+ ANT=""
+ fi
+ ])
+
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_lib_event.m4 b/vendor/github.com/apache/thrift/aclocal/ax_lib_event.m4
new file mode 100644
index 000000000..d4dcdc9a6
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_lib_event.m4
@@ -0,0 +1,194 @@
+dnl @synopsis AX_LIB_EVENT([MINIMUM-VERSION])
+dnl
+dnl Test for the libevent library of a particular version (or newer).
+dnl
+dnl If no path to the installed libevent is given, the macro will first try
+dnl using no -I or -L flags, then searches under /usr, /usr/local, /opt,
+dnl and /opt/libevent.
+dnl If these all fail, it will try the $LIBEVENT_ROOT environment variable.
+dnl
+dnl This macro requires that #include works and defines u_char.
+dnl
+dnl This macro calls:
+dnl AC_SUBST(LIBEVENT_CPPFLAGS)
+dnl AC_SUBST(LIBEVENT_LDFLAGS)
+dnl AC_SUBST(LIBEVENT_LIBS)
+dnl
+dnl And (if libevent is found):
+dnl AC_DEFINE(HAVE_LIBEVENT)
+dnl
+dnl It also leaves the shell variables "success" and "ax_have_libevent"
+dnl set to "yes" or "no".
+dnl
+dnl NOTE: This macro does not currently work for cross-compiling,
+dnl but it can be easily modified to allow it. (grep "cross").
+dnl
+dnl @category InstalledPackages
+dnl @category C
+dnl @version 2007-09-12
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+dnl Input: ax_libevent_path, WANT_LIBEVENT_VERSION
+dnl Output: success=yes/no
+AC_DEFUN([AX_LIB_EVENT_DO_CHECK],
+ [
+ # Save our flags.
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ LDFLAGS_SAVED="$LDFLAGS"
+ LIBS_SAVED="$LIBS"
+ LD_LIBRARY_PATH_SAVED="$LD_LIBRARY_PATH"
+
+ # Set our flags if we are checking a specific directory.
+ if test -n "$ax_libevent_path" ; then
+ LIBEVENT_CPPFLAGS="-I$ax_libevent_path/include"
+ LIBEVENT_LDFLAGS="-L$ax_libevent_path/lib"
+ LD_LIBRARY_PATH="$ax_libevent_path/lib:$LD_LIBRARY_PATH"
+ else
+ LIBEVENT_CPPFLAGS=""
+ LIBEVENT_LDFLAGS=""
+ fi
+
+ # Required flag for libevent.
+ LIBEVENT_LIBS="-levent"
+
+ # Prepare the environment for compilation.
+ CPPFLAGS="$CPPFLAGS $LIBEVENT_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $LIBEVENT_LDFLAGS"
+ LIBS="$LIBS $LIBEVENT_LIBS"
+ export CPPFLAGS
+ export LDFLAGS
+ export LIBS
+ export LD_LIBRARY_PATH
+
+ success=no
+
+ # Compile, link, and run the program. This checks:
+ # - event.h is available for including.
+ # - event_get_version() is available for linking.
+ # - The event version string is lexicographically greater
+ # than the required version.
+ AC_LANG_PUSH([C])
+ dnl This can be changed to AC_LINK_IFELSE if you are cross-compiling,
+ dnl but then the version cannot be checked.
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+ #include
+ #include
+ ]], [[
+ const char* lib_version = event_get_version();
+ const char* wnt_version = "$WANT_LIBEVENT_VERSION";
+ int lib_digits;
+ int wnt_digits;
+ for (;;) {
+ /* If we reached the end of the want version. We have it. */
+ if (*wnt_version == '\0' || *wnt_version == '-') {
+ return 0;
+ }
+ /* If the want version continues but the lib version does not, */
+ /* we are missing a letter. We don't have it. */
+ if (*lib_version == '\0' || *lib_version == '-') {
+ return 1;
+ }
+ /* In the 1.4 version numbering style, if there are more digits */
+ /* in one version than the other, that one is higher. */
+ for (lib_digits = 0;
+ lib_version[lib_digits] >= '0' &&
+ lib_version[lib_digits] <= '9';
+ lib_digits++)
+ ;
+ for (wnt_digits = 0;
+ wnt_version[wnt_digits] >= '0' &&
+ wnt_version[wnt_digits] <= '9';
+ wnt_digits++)
+ ;
+ if (lib_digits > wnt_digits) {
+ return 0;
+ }
+ if (lib_digits < wnt_digits) {
+ return 1;
+ }
+ /* If we have greater than what we want. We have it. */
+ if (*lib_version > *wnt_version) {
+ return 0;
+ }
+ /* If we have less, we don't. */
+ if (*lib_version < *wnt_version) {
+ return 1;
+ }
+ lib_version++;
+ wnt_version++;
+ }
+ return 0;
+ ]])], [
+ success=yes
+ ])
+ AC_LANG_POP([C])
+
+ # Restore flags.
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+ LIBS="$LIBS_SAVED"
+ LD_LIBRARY_PATH="$LD_LIBRARY_PATH_SAVED"
+ ])
+
+
+AC_DEFUN([AX_LIB_EVENT],
+ [
+
+ dnl Allow search path to be overridden on the command line.
+ AC_ARG_WITH([libevent],
+ AS_HELP_STRING([--with-libevent@<:@=DIR@:>@], [use libevent [default=yes]. Optionally specify the root prefix dir where libevent is installed]),
+ [
+ if test "x$withval" = "xno"; then
+ want_libevent="no"
+ elif test "x$withval" = "xyes"; then
+ want_libevent="yes"
+ ax_libevent_path=""
+ else
+ want_libevent="yes"
+ ax_libevent_path="$withval"
+ fi
+ ],
+ [ want_libevent="yes" ; ax_libevent_path="" ])
+
+
+ if test "$want_libevent" = "yes"; then
+ WANT_LIBEVENT_VERSION=ifelse([$1], ,1.2,$1)
+
+ AC_MSG_CHECKING(for libevent >= $WANT_LIBEVENT_VERSION)
+
+ # Run tests.
+ if test -n "$ax_libevent_path"; then
+ AX_LIB_EVENT_DO_CHECK
+ else
+ for ax_libevent_path in "" $lt_sysroot/usr $lt_sysroot/usr/local $lt_sysroot/opt $lt_sysroot/opt/local $lt_sysroot/opt/libevent "$LIBEVENT_ROOT" ; do
+ AX_LIB_EVENT_DO_CHECK
+ if test "$success" = "yes"; then
+ break;
+ fi
+ done
+ fi
+
+ if test "$success" != "yes" ; then
+ AC_MSG_RESULT(no)
+ LIBEVENT_CPPFLAGS=""
+ LIBEVENT_LDFLAGS=""
+ LIBEVENT_LIBS=""
+ else
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_LIBEVENT,,[define if libevent is available])
+ ax_have_libevent_[]m4_translit([$1], [.], [_])="yes"
+ fi
+
+ ax_have_libevent="$success"
+
+ AC_SUBST(LIBEVENT_CPPFLAGS)
+ AC_SUBST(LIBEVENT_LDFLAGS)
+ AC_SUBST(LIBEVENT_LIBS)
+ fi
+
+ ])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_lib_zlib.m4 b/vendor/github.com/apache/thrift/aclocal/ax_lib_zlib.m4
new file mode 100644
index 000000000..bdb9e110e
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_lib_zlib.m4
@@ -0,0 +1,173 @@
+dnl @synopsis AX_LIB_ZLIB([MINIMUM-VERSION])
+dnl
+dnl Test for the libz library of a particular version (or newer).
+dnl
+dnl If no path to the installed zlib is given, the macro will first try
+dnl using no -I or -L flags, then searches under /usr, /usr/local, /opt,
+dnl and /opt/zlib.
+dnl If these all fail, it will try the $ZLIB_ROOT environment variable.
+dnl
+dnl This macro calls:
+dnl AC_SUBST(ZLIB_CPPFLAGS)
+dnl AC_SUBST(ZLIB_LDFLAGS)
+dnl AC_SUBST(ZLIB_LIBS)
+dnl
+dnl And (if zlib is found):
+dnl AC_DEFINE(HAVE_ZLIB)
+dnl
+dnl It also leaves the shell variables "success" and "ax_have_zlib"
+dnl set to "yes" or "no".
+dnl
+dnl NOTE: This macro does not currently work for cross-compiling,
+dnl but it can be easily modified to allow it. (grep "cross").
+dnl
+dnl @category InstalledPackages
+dnl @category C
+dnl @version 2007-09-12
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+dnl Input: ax_zlib_path, WANT_ZLIB_VERSION
+dnl Output: success=yes/no
+AC_DEFUN([AX_LIB_ZLIB_DO_CHECK],
+ [
+ # Save our flags.
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ LDFLAGS_SAVED="$LDFLAGS"
+ LIBS_SAVED="$LIBS"
+ LD_LIBRARY_PATH_SAVED="$LD_LIBRARY_PATH"
+
+ # Set our flags if we are checking a specific directory.
+ if test -n "$ax_zlib_path" ; then
+ ZLIB_CPPFLAGS="-I$ax_zlib_path/include"
+ ZLIB_LDFLAGS="-L$ax_zlib_path/lib"
+ LD_LIBRARY_PATH="$ax_zlib_path/lib:$LD_LIBRARY_PATH"
+ else
+ ZLIB_CPPFLAGS=""
+ ZLIB_LDFLAGS=""
+ fi
+
+ # Required flag for zlib.
+ ZLIB_LIBS="-lz"
+
+ # Prepare the environment for compilation.
+ CPPFLAGS="$CPPFLAGS $ZLIB_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS"
+ LIBS="$LIBS $ZLIB_LIBS"
+ export CPPFLAGS
+ export LDFLAGS
+ export LIBS
+ export LD_LIBRARY_PATH
+
+ success=no
+
+ # Compile, link, and run the program. This checks:
+ # - zlib.h is available for including.
+ # - zlibVersion() is available for linking.
+ # - ZLIB_VERNUM is greater than or equal to the desired version.
+ # - ZLIB_VERSION (defined in zlib.h) matches zlibVersion()
+ # (defined in the library).
+ AC_LANG_PUSH([C])
+ dnl This can be changed to AC_LINK_IFELSE if you are cross-compiling.
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+ #include
+ #if ZLIB_VERNUM >= 0x$WANT_ZLIB_VERSION
+ #else
+ # error zlib is too old
+ #endif
+ ]], [[
+ const char* lib_version = zlibVersion();
+ const char* hdr_version = ZLIB_VERSION;
+ for (;;) {
+ if (*lib_version != *hdr_version) {
+ /* If this happens, your zlib header doesn't match your zlib */
+ /* library. That is really bad. */
+ return 1;
+ }
+ if (*lib_version == '\0') {
+ break;
+ }
+ lib_version++;
+ hdr_version++;
+ }
+ return 0;
+ ]])], [
+ success=yes
+ ])
+ AC_LANG_POP([C])
+
+ # Restore flags.
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+ LIBS="$LIBS_SAVED"
+ LD_LIBRARY_PATH="$LD_LIBRARY_PATH_SAVED"
+ ])
+
+
+AC_DEFUN([AX_LIB_ZLIB],
+ [
+
+ dnl Allow search path to be overridden on the command line.
+ AC_ARG_WITH([zlib],
+ AS_HELP_STRING([--with-zlib@<:@=DIR@:>@], [use zlib (default is yes) - it is possible to specify an alternate root directory for zlib]),
+ [
+ if test "x$withval" = "xno"; then
+ want_zlib="no"
+ elif test "x$withval" = "xyes"; then
+ want_zlib="yes"
+ ax_zlib_path=""
+ else
+ want_zlib="yes"
+ ax_zlib_path="$withval"
+ fi
+ ],
+ [want_zlib="yes" ; ax_zlib_path="" ])
+
+
+ if test "$want_zlib" = "yes"; then
+ # Parse out the version.
+ zlib_version_req=ifelse([$1], ,1.2.3,$1)
+ zlib_version_req_major=`expr $zlib_version_req : '\([[0-9]]*\)'`
+ zlib_version_req_minor=`expr $zlib_version_req : '[[0-9]]*\.\([[0-9]]*\)'`
+ zlib_version_req_patch=`expr $zlib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
+ if test -z "$zlib_version_req_patch" ; then
+ zlib_version_req_patch="0"
+ fi
+ WANT_ZLIB_VERSION=`expr $zlib_version_req_major \* 1000 \+ $zlib_version_req_minor \* 100 \+ $zlib_version_req_patch \* 10`
+
+ AC_MSG_CHECKING(for zlib >= $zlib_version_req)
+
+ # Run tests.
+ if test -n "$ax_zlib_path"; then
+ AX_LIB_ZLIB_DO_CHECK
+ else
+ for ax_zlib_path in "" /usr /usr/local /opt /opt/zlib "$ZLIB_ROOT" ; do
+ AX_LIB_ZLIB_DO_CHECK
+ if test "$success" = "yes"; then
+ break;
+ fi
+ done
+ fi
+
+ if test "$success" != "yes" ; then
+ AC_MSG_RESULT(no)
+ ZLIB_CPPFLAGS=""
+ ZLIB_LDFLAGS=""
+ ZLIB_LIBS=""
+ else
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_ZLIB,,[define if zlib is available])
+ fi
+
+ ax_have_zlib="$success"
+
+ AC_SUBST(ZLIB_CPPFLAGS)
+ AC_SUBST(ZLIB_LDFLAGS)
+ AC_SUBST(ZLIB_LIBS)
+ fi
+
+ ])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_lua.m4 b/vendor/github.com/apache/thrift/aclocal/ax_lua.m4
new file mode 100644
index 000000000..9feb35225
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_lua.m4
@@ -0,0 +1,664 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_lua.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_LUA[([MINIMUM-VERSION], [TOO-BIG-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
+# AX_LUA_HEADERS[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
+# AX_LUA_LIBS[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
+# AX_LUA_READLINE[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
+#
+# DESCRIPTION
+#
+# Detect a Lua interpreter, optionally specifying a minimum and maximum
+# version number. Set up important Lua paths, such as the directories in
+# which to install scripts and modules (shared libraries).
+#
+# Also detect Lua headers and libraries. The Lua version contained in the
+# header is checked to match the Lua interpreter version exactly. When
+# searching for Lua libraries, the version number is used as a suffix.
+# This is done with the goal of supporting multiple Lua installs (5.1,
+# 5.2, and 5.3 side-by-side).
+#
+# A note on compatibility with previous versions: This file has been
+# mostly rewritten for serial 18. Most developers should be able to use
+# these macros without needing to modify configure.ac. Care has been taken
+# to preserve each macro's behavior, but there are some differences:
+#
+# 1) AX_WITH_LUA is deprecated; it now expands to the exact same thing as
+# AX_PROG_LUA with no arguments.
+#
+# 2) AX_LUA_HEADERS now checks that the version number defined in lua.h
+# matches the interpreter version. AX_LUA_HEADERS_VERSION is therefore
+# unnecessary, so it is deprecated and does not expand to anything.
+#
+# 3) The configure flag --with-lua-suffix no longer exists; the user
+# should instead specify the LUA precious variable on the command line.
+# See the AX_PROG_LUA description for details.
+#
+# Please read the macro descriptions below for more information.
+#
+# This file was inspired by Andrew Dalke's and James Henstridge's
+# python.m4 and Tom Payne's, Matthieu Moy's, and Reuben Thomas's ax_lua.m4
+# (serial 17). Basically, this file is a mash-up of those two files. I
+# like to think it combines the best of the two!
+#
+# AX_PROG_LUA: Search for the Lua interpreter, and set up important Lua
+# paths. Adds precious variable LUA, which may contain the path of the Lua
+# interpreter. If LUA is blank, the user's path is searched for an
+# suitable interpreter.
+#
+# If MINIMUM-VERSION is supplied, then only Lua interpreters with a
+# version number greater or equal to MINIMUM-VERSION will be accepted. If
+# TOO-BIG-VERSION is also supplied, then only Lua interpreters with a
+# version number greater or equal to MINIMUM-VERSION and less than
+# TOO-BIG-VERSION will be accepted.
+#
+# The Lua version number, LUA_VERSION, is found from the interpreter, and
+# substituted. LUA_PLATFORM is also found, but not currently supported (no
+# standard representation).
+#
+# Finally, the macro finds four paths:
+#
+# luadir Directory to install Lua scripts.
+# pkgluadir $luadir/$PACKAGE
+# luaexecdir Directory to install Lua modules.
+# pkgluaexecdir $luaexecdir/$PACKAGE
+#
+# These paths are found based on $prefix, $exec_prefix, Lua's
+# package.path, and package.cpath. The first path of package.path
+# beginning with $prefix is selected as luadir. The first path of
+# package.cpath beginning with $exec_prefix is used as luaexecdir. This
+# should work on all reasonable Lua installations. If a path cannot be
+# determined, a default path is used. Of course, the user can override
+# these later when invoking make.
+#
+# luadir Default: $prefix/share/lua/$LUA_VERSION
+# luaexecdir Default: $exec_prefix/lib/lua/$LUA_VERSION
+#
+# These directories can be used by Automake as install destinations. The
+# variable name minus 'dir' needs to be used as a prefix to the
+# appropriate Automake primary, e.g. lua_SCRIPS or luaexec_LIBRARIES.
+#
+# If an acceptable Lua interpreter is found, then ACTION-IF-FOUND is
+# performed, otherwise ACTION-IF-NOT-FOUND is preformed. If ACTION-IF-NOT-
+# FOUND is blank, then it will default to printing an error. To prevent
+# the default behavior, give ':' as an action.
+#
+# AX_LUA_HEADERS: Search for Lua headers. Requires that AX_PROG_LUA be
+# expanded before this macro. Adds precious variable LUA_INCLUDE, which
+# may contain Lua specific include flags, e.g. -I/usr/include/lua5.1. If
+# LUA_INCLUDE is blank, then this macro will attempt to find suitable
+# flags.
+#
+# LUA_INCLUDE can be used by Automake to compile Lua modules or
+# executables with embedded interpreters. The *_CPPFLAGS variables should
+# be used for this purpose, e.g. myprog_CPPFLAGS = $(LUA_INCLUDE).
+#
+# This macro searches for the header lua.h (and others). The search is
+# performed with a combination of CPPFLAGS, CPATH, etc, and LUA_INCLUDE.
+# If the search is unsuccessful, then some common directories are tried.
+# If the headers are then found, then LUA_INCLUDE is set accordingly.
+#
+# The paths automatically searched are:
+#
+# * /usr/include/luaX.Y
+# * /usr/include/lua/X.Y
+# * /usr/include/luaXY
+# * /usr/local/include/luaX.Y
+# * /usr/local/include/lua-X.Y
+# * /usr/local/include/lua/X.Y
+# * /usr/local/include/luaXY
+#
+# (Where X.Y is the Lua version number, e.g. 5.1.)
+#
+# The Lua version number found in the headers is always checked to match
+# the Lua interpreter's version number. Lua headers with mismatched
+# version numbers are not accepted.
+#
+# If headers are found, then ACTION-IF-FOUND is performed, otherwise
+# ACTION-IF-NOT-FOUND is performed. If ACTION-IF-NOT-FOUND is blank, then
+# it will default to printing an error. To prevent the default behavior,
+# set the action to ':'.
+#
+# AX_LUA_LIBS: Search for Lua libraries. Requires that AX_PROG_LUA be
+# expanded before this macro. Adds precious variable LUA_LIB, which may
+# contain Lua specific linker flags, e.g. -llua5.1. If LUA_LIB is blank,
+# then this macro will attempt to find suitable flags.
+#
+# LUA_LIB can be used by Automake to link Lua modules or executables with
+# embedded interpreters. The *_LIBADD and *_LDADD variables should be used
+# for this purpose, e.g. mymod_LIBADD = $(LUA_LIB).
+#
+# This macro searches for the Lua library. More technically, it searches
+# for a library containing the function lua_load. The search is performed
+# with a combination of LIBS, LIBRARY_PATH, and LUA_LIB.
+#
+# If the search determines that some linker flags are missing, then those
+# flags will be added to LUA_LIB.
+#
+# If libraries are found, then ACTION-IF-FOUND is performed, otherwise
+# ACTION-IF-NOT-FOUND is performed. If ACTION-IF-NOT-FOUND is blank, then
+# it will default to printing an error. To prevent the default behavior,
+# set the action to ':'.
+#
+# AX_LUA_READLINE: Search for readline headers and libraries. Requires the
+# AX_LIB_READLINE macro, which is provided by ax_lib_readline.m4 from the
+# Autoconf Archive.
+#
+# If a readline compatible library is found, then ACTION-IF-FOUND is
+# performed, otherwise ACTION-IF-NOT-FOUND is performed.
+#
+# LICENSE
+#
+# Copyright (c) 2015 Reuben Thomas
+# Copyright (c) 2014 Tim Perkins
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see .
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 39
+
+dnl =========================================================================
+dnl AX_PROG_LUA([MINIMUM-VERSION], [TOO-BIG-VERSION],
+dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl =========================================================================
+AC_DEFUN([AX_PROG_LUA],
+[
+ dnl Check for required tools.
+ AC_REQUIRE([AC_PROG_GREP])
+ AC_REQUIRE([AC_PROG_SED])
+
+ dnl Make LUA a precious variable.
+ AC_ARG_VAR([LUA], [The Lua interpreter, e.g. /usr/bin/lua5.1])
+
+ dnl Find a Lua interpreter.
+ m4_define_default([_AX_LUA_INTERPRETER_LIST],
+ [lua lua5.3 lua53 lua5.2 lua52 lua5.1 lua51 lua50])
+
+ m4_if([$1], [],
+ [ dnl No version check is needed. Find any Lua interpreter.
+ AS_IF([test "x$LUA" = 'x'],
+ [AC_PATH_PROGS([LUA], [_AX_LUA_INTERPRETER_LIST], [:])])
+ ax_display_LUA='lua'
+
+ AS_IF([test "x$LUA" != 'x:'],
+ [ dnl At least check if this is a Lua interpreter.
+ AC_MSG_CHECKING([if $LUA is a Lua interpreter])
+ _AX_LUA_CHK_IS_INTRP([$LUA],
+ [AC_MSG_RESULT([yes])],
+ [ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([not a Lua interpreter])
+ ])
+ ])
+ ],
+ [ dnl A version check is needed.
+ AS_IF([test "x$LUA" != 'x'],
+ [ dnl Check if this is a Lua interpreter.
+ AC_MSG_CHECKING([if $LUA is a Lua interpreter])
+ _AX_LUA_CHK_IS_INTRP([$LUA],
+ [AC_MSG_RESULT([yes])],
+ [ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([not a Lua interpreter])
+ ])
+ dnl Check the version.
+ m4_if([$2], [],
+ [_ax_check_text="whether $LUA version >= $1"],
+ [_ax_check_text="whether $LUA version >= $1, < $2"])
+ AC_MSG_CHECKING([$_ax_check_text])
+ _AX_LUA_CHK_VER([$LUA], [$1], [$2],
+ [AC_MSG_RESULT([yes])],
+ [ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([version is out of range for specified LUA])])
+ ax_display_LUA=$LUA
+ ],
+ [ dnl Try each interpreter until we find one that satisfies VERSION.
+ m4_if([$2], [],
+ [_ax_check_text="for a Lua interpreter with version >= $1"],
+ [_ax_check_text="for a Lua interpreter with version >= $1, < $2"])
+ AC_CACHE_CHECK([$_ax_check_text],
+ [ax_cv_pathless_LUA],
+ [ for ax_cv_pathless_LUA in _AX_LUA_INTERPRETER_LIST none; do
+ test "x$ax_cv_pathless_LUA" = 'xnone' && break
+ _AX_LUA_CHK_IS_INTRP([$ax_cv_pathless_LUA], [], [continue])
+ _AX_LUA_CHK_VER([$ax_cv_pathless_LUA], [$1], [$2], [break])
+ done
+ ])
+ dnl Set $LUA to the absolute path of $ax_cv_pathless_LUA.
+ AS_IF([test "x$ax_cv_pathless_LUA" = 'xnone'],
+ [LUA=':'],
+ [AC_PATH_PROG([LUA], [$ax_cv_pathless_LUA])])
+ ax_display_LUA=$ax_cv_pathless_LUA
+ ])
+ ])
+
+ AS_IF([test "x$LUA" = 'x:'],
+ [ dnl Run any user-specified action, or abort.
+ m4_default([$4], [AC_MSG_ERROR([cannot find suitable Lua interpreter])])
+ ],
+ [ dnl Query Lua for its version number.
+ AC_CACHE_CHECK([for $ax_display_LUA version],
+ [ax_cv_lua_version],
+ [ dnl Get the interpreter version in X.Y format. This should work for
+ dnl interpreters version 5.0 and beyond.
+ ax_cv_lua_version=[`$LUA -e '
+ -- return a version number in X.Y format
+ local _, _, ver = string.find(_VERSION, "^Lua (%d+%.%d+)")
+ print(ver)'`]
+ ])
+ AS_IF([test "x$ax_cv_lua_version" = 'x'],
+ [AC_MSG_ERROR([invalid Lua version number])])
+ AC_SUBST([LUA_VERSION], [$ax_cv_lua_version])
+ AC_SUBST([LUA_SHORT_VERSION], [`echo "$LUA_VERSION" | $SED 's|\.||'`])
+
+ dnl The following check is not supported:
+ dnl At times (like when building shared libraries) you may want to know
+ dnl which OS platform Lua thinks this is.
+ AC_CACHE_CHECK([for $ax_display_LUA platform],
+ [ax_cv_lua_platform],
+ [ax_cv_lua_platform=[`$LUA -e 'print("unknown")'`]])
+ AC_SUBST([LUA_PLATFORM], [$ax_cv_lua_platform])
+
+ dnl Use the values of $prefix and $exec_prefix for the corresponding
+ dnl values of LUA_PREFIX and LUA_EXEC_PREFIX. These are made distinct
+ dnl variables so they can be overridden if need be. However, the general
+ dnl consensus is that you shouldn't need this ability.
+ AC_SUBST([LUA_PREFIX], ['${prefix}'])
+ AC_SUBST([LUA_EXEC_PREFIX], ['${exec_prefix}'])
+
+ dnl Lua provides no way to query the script directory, and instead
+ dnl provides LUA_PATH. However, we should be able to make a safe educated
+ dnl guess. If the built-in search path contains a directory which is
+ dnl prefixed by $prefix, then we can store scripts there. The first
+ dnl matching path will be used.
+ AC_CACHE_CHECK([for $ax_display_LUA script directory],
+ [ax_cv_lua_luadir],
+ [ AS_IF([test "x$prefix" = 'xNONE'],
+ [ax_lua_prefix=$ac_default_prefix],
+ [ax_lua_prefix=$prefix])
+
+ dnl Initialize to the default path.
+ ax_cv_lua_luadir="$LUA_PREFIX/share/lua/$LUA_VERSION"
+
+ dnl Try to find a path with the prefix.
+ _AX_LUA_FND_PRFX_PTH([$LUA], [$ax_lua_prefix], [script])
+ AS_IF([test "x$ax_lua_prefixed_path" != 'x'],
+ [ dnl Fix the prefix.
+ _ax_strip_prefix=`echo "$ax_lua_prefix" | $SED 's|.|.|g'`
+ ax_cv_lua_luadir=`echo "$ax_lua_prefixed_path" | \
+ $SED "s|^$_ax_strip_prefix|$LUA_PREFIX|"`
+ ])
+ ])
+ AC_SUBST([luadir], [$ax_cv_lua_luadir])
+ AC_SUBST([pkgluadir], [\${luadir}/$PACKAGE])
+
+ dnl Lua provides no way to query the module directory, and instead
+ dnl provides LUA_PATH. However, we should be able to make a safe educated
+ dnl guess. If the built-in search path contains a directory which is
+ dnl prefixed by $exec_prefix, then we can store modules there. The first
+ dnl matching path will be used.
+ AC_CACHE_CHECK([for $ax_display_LUA module directory],
+ [ax_cv_lua_luaexecdir],
+ [ AS_IF([test "x$exec_prefix" = 'xNONE'],
+ [ax_lua_exec_prefix=$ax_lua_prefix],
+ [ax_lua_exec_prefix=$exec_prefix])
+
+ dnl Initialize to the default path.
+ ax_cv_lua_luaexecdir="$LUA_EXEC_PREFIX/lib/lua/$LUA_VERSION"
+
+ dnl Try to find a path with the prefix.
+ _AX_LUA_FND_PRFX_PTH([$LUA],
+ [$ax_lua_exec_prefix], [module])
+ AS_IF([test "x$ax_lua_prefixed_path" != 'x'],
+ [ dnl Fix the prefix.
+ _ax_strip_prefix=`echo "$ax_lua_exec_prefix" | $SED 's|.|.|g'`
+ ax_cv_lua_luaexecdir=`echo "$ax_lua_prefixed_path" | \
+ $SED "s|^$_ax_strip_prefix|$LUA_EXEC_PREFIX|"`
+ ])
+ ])
+ AC_SUBST([luaexecdir], [$ax_cv_lua_luaexecdir])
+ AC_SUBST([pkgluaexecdir], [\${luaexecdir}/$PACKAGE])
+
+ dnl Run any user specified action.
+ $3
+ ])
+])
+
+dnl AX_WITH_LUA is now the same thing as AX_PROG_LUA.
+AC_DEFUN([AX_WITH_LUA],
+[
+ AC_MSG_WARN([[$0 is deprecated, please use AX_PROG_LUA instead]])
+ AX_PROG_LUA
+])
+
+
+dnl =========================================================================
+dnl _AX_LUA_CHK_IS_INTRP(PROG, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
+dnl =========================================================================
+AC_DEFUN([_AX_LUA_CHK_IS_INTRP],
+[
+ dnl A minimal Lua factorial to prove this is an interpreter. This should work
+ dnl for Lua interpreters version 5.0 and beyond.
+ _ax_lua_factorial=[`$1 2>/dev/null -e '
+ -- a simple factorial
+ function fact (n)
+ if n == 0 then
+ return 1
+ else
+ return n * fact(n-1)
+ end
+ end
+ print("fact(5) is " .. fact(5))'`]
+ AS_IF([test "$_ax_lua_factorial" = 'fact(5) is 120'],
+ [$2], [$3])
+])
+
+
+dnl =========================================================================
+dnl _AX_LUA_CHK_VER(PROG, MINIMUM-VERSION, [TOO-BIG-VERSION],
+dnl [ACTION-IF-TRUE], [ACTION-IF-FALSE])
+dnl =========================================================================
+AC_DEFUN([_AX_LUA_CHK_VER],
+[
+ dnl Check that the Lua version is within the bounds. Only the major and minor
+ dnl version numbers are considered. This should work for Lua interpreters
+ dnl version 5.0 and beyond.
+ _ax_lua_good_version=[`$1 -e '
+ -- a script to compare versions
+ function verstr2num(verstr)
+ local _, _, majorver, minorver = string.find(verstr, "^(%d+)%.(%d+)")
+ if majorver and minorver then
+ return tonumber(majorver) * 100 + tonumber(minorver)
+ end
+ end
+ local minver = verstr2num("$2")
+ local _, _, trimver = string.find(_VERSION, "^Lua (.*)")
+ local ver = verstr2num(trimver)
+ local maxver = verstr2num("$3") or 1e9
+ if minver <= ver and ver < maxver then
+ print("yes")
+ else
+ print("no")
+ end'`]
+ AS_IF([test "x$_ax_lua_good_version" = "xyes"],
+ [$4], [$5])
+])
+
+
+dnl =========================================================================
+dnl _AX_LUA_FND_PRFX_PTH(PROG, PREFIX, SCRIPT-OR-MODULE-DIR)
+dnl =========================================================================
+AC_DEFUN([_AX_LUA_FND_PRFX_PTH],
+[
+ dnl Get the script or module directory by querying the Lua interpreter,
+ dnl filtering on the given prefix, and selecting the shallowest path. If no
+ dnl path is found matching the prefix, the result will be an empty string.
+ dnl The third argument determines the type of search, it can be 'script' or
+ dnl 'module'. Supplying 'script' will perform the search with package.path
+ dnl and LUA_PATH, and supplying 'module' will search with package.cpath and
+ dnl LUA_CPATH. This is done for compatibility with Lua 5.0.
+
+ ax_lua_prefixed_path=[`$1 -e '
+ -- get the path based on search type
+ local searchtype = "$3"
+ local paths = ""
+ if searchtype == "script" then
+ paths = (package and package.path) or LUA_PATH
+ elseif searchtype == "module" then
+ paths = (package and package.cpath) or LUA_CPATH
+ end
+ -- search for the prefix
+ local prefix = "'$2'"
+ local minpath = ""
+ local mindepth = 1e9
+ string.gsub(paths, "(@<:@^;@:>@+)",
+ function (path)
+ path = string.gsub(path, "%?.*$", "")
+ path = string.gsub(path, "/@<:@^/@:>@*$", "")
+ if string.find(path, prefix) then
+ local depth = string.len(string.gsub(path, "@<:@^/@:>@", ""))
+ if depth < mindepth then
+ minpath = path
+ mindepth = depth
+ end
+ end
+ end)
+ print(minpath)'`]
+])
+
+
+dnl =========================================================================
+dnl AX_LUA_HEADERS([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl =========================================================================
+AC_DEFUN([AX_LUA_HEADERS],
+[
+ dnl Check for LUA_VERSION.
+ AC_MSG_CHECKING([if LUA_VERSION is defined])
+ AS_IF([test "x$LUA_VERSION" != 'x'],
+ [AC_MSG_RESULT([yes])],
+ [ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([cannot check Lua headers without knowing LUA_VERSION])
+ ])
+
+ dnl Make LUA_INCLUDE a precious variable.
+ AC_ARG_VAR([LUA_INCLUDE], [The Lua includes, e.g. -I/usr/include/lua5.1])
+
+ dnl Some default directories to search.
+ LUA_SHORT_VERSION=`echo "$LUA_VERSION" | $SED 's|\.||'`
+ m4_define_default([_AX_LUA_INCLUDE_LIST],
+ [ /usr/include/lua$LUA_VERSION \
+ /usr/include/lua-$LUA_VERSION \
+ /usr/include/lua/$LUA_VERSION \
+ /usr/include/lua$LUA_SHORT_VERSION \
+ /usr/local/include/lua$LUA_VERSION \
+ /usr/local/include/lua-$LUA_VERSION \
+ /usr/local/include/lua/$LUA_VERSION \
+ /usr/local/include/lua$LUA_SHORT_VERSION \
+ ])
+
+ dnl Try to find the headers.
+ _ax_lua_saved_cppflags=$CPPFLAGS
+ CPPFLAGS="$CPPFLAGS $LUA_INCLUDE"
+ AC_CHECK_HEADERS([lua.h lualib.h lauxlib.h luaconf.h])
+ CPPFLAGS=$_ax_lua_saved_cppflags
+
+ dnl Try some other directories if LUA_INCLUDE was not set.
+ AS_IF([test "x$LUA_INCLUDE" = 'x' &&
+ test "x$ac_cv_header_lua_h" != 'xyes'],
+ [ dnl Try some common include paths.
+ for _ax_include_path in _AX_LUA_INCLUDE_LIST; do
+ test ! -d "$_ax_include_path" && continue
+
+ AC_MSG_CHECKING([for Lua headers in])
+ AC_MSG_RESULT([$_ax_include_path])
+
+ AS_UNSET([ac_cv_header_lua_h])
+ AS_UNSET([ac_cv_header_lualib_h])
+ AS_UNSET([ac_cv_header_lauxlib_h])
+ AS_UNSET([ac_cv_header_luaconf_h])
+
+ _ax_lua_saved_cppflags=$CPPFLAGS
+ CPPFLAGS="$CPPFLAGS -I$_ax_include_path"
+ AC_CHECK_HEADERS([lua.h lualib.h lauxlib.h luaconf.h])
+ CPPFLAGS=$_ax_lua_saved_cppflags
+
+ AS_IF([test "x$ac_cv_header_lua_h" = 'xyes'],
+ [ LUA_INCLUDE="-I$_ax_include_path"
+ break
+ ])
+ done
+ ])
+
+ AS_IF([test "x$ac_cv_header_lua_h" = 'xyes'],
+ [ dnl Make a program to print LUA_VERSION defined in the header.
+ dnl TODO It would be really nice if we could do this without compiling a
+ dnl program, then it would work when cross compiling. But I'm not sure how
+ dnl to do this reliably. For now, assume versions match when cross compiling.
+
+ AS_IF([test "x$cross_compiling" != 'xyes'],
+ [ AC_CACHE_CHECK([for Lua header version],
+ [ax_cv_lua_header_version],
+ [ _ax_lua_saved_cppflags=$CPPFLAGS
+ CPPFLAGS="$CPPFLAGS $LUA_INCLUDE"
+ AC_RUN_IFELSE(
+ [ AC_LANG_SOURCE([[
+#include
+#include
+#include
+int main(int argc, char ** argv)
+{
+ if(argc > 1) printf("%s", LUA_VERSION);
+ exit(EXIT_SUCCESS);
+}
+]])
+ ],
+ [ ax_cv_lua_header_version=`./conftest$EXEEXT p | \
+ $SED -n "s|^Lua \(@<:@0-9@:>@\{1,\}\.@<:@0-9@:>@\{1,\}\).\{0,\}|\1|p"`
+ ],
+ [ax_cv_lua_header_version='unknown'])
+ CPPFLAGS=$_ax_lua_saved_cppflags
+ ])
+
+ dnl Compare this to the previously found LUA_VERSION.
+ AC_MSG_CHECKING([if Lua header version matches $LUA_VERSION])
+ AS_IF([test "x$ax_cv_lua_header_version" = "x$LUA_VERSION"],
+ [ AC_MSG_RESULT([yes])
+ ax_header_version_match='yes'
+ ],
+ [ AC_MSG_RESULT([no])
+ ax_header_version_match='no'
+ ])
+ ],
+ [ AC_MSG_WARN([cross compiling so assuming header version number matches])
+ ax_header_version_match='yes'
+ ])
+ ])
+
+ dnl Was LUA_INCLUDE specified?
+ AS_IF([test "x$ax_header_version_match" != 'xyes' &&
+ test "x$LUA_INCLUDE" != 'x'],
+ [AC_MSG_ERROR([cannot find headers for specified LUA_INCLUDE])])
+
+ dnl Test the final result and run user code.
+ AS_IF([test "x$ax_header_version_match" = 'xyes'], [$1],
+ [m4_default([$2], [AC_MSG_ERROR([cannot find Lua includes])])])
+])
+
+dnl AX_LUA_HEADERS_VERSION no longer exists, use AX_LUA_HEADERS.
+AC_DEFUN([AX_LUA_HEADERS_VERSION],
+[
+ AC_MSG_WARN([[$0 is deprecated, please use AX_LUA_HEADERS instead]])
+])
+
+
+dnl =========================================================================
+dnl AX_LUA_LIBS([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl =========================================================================
+AC_DEFUN([AX_LUA_LIBS],
+[
+ dnl TODO Should this macro also check various -L flags?
+
+ dnl Check for LUA_VERSION.
+ AC_MSG_CHECKING([if LUA_VERSION is defined])
+ AS_IF([test "x$LUA_VERSION" != 'x'],
+ [AC_MSG_RESULT([yes])],
+ [ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([cannot check Lua libs without knowing LUA_VERSION])
+ ])
+
+ dnl Make LUA_LIB a precious variable.
+ AC_ARG_VAR([LUA_LIB], [The Lua library, e.g. -llua5.1])
+
+ AS_IF([test "x$LUA_LIB" != 'x'],
+ [ dnl Check that LUA_LIBS works.
+ _ax_lua_saved_libs=$LIBS
+ LIBS="$LIBS $LUA_LIB"
+ AC_SEARCH_LIBS([lua_load], [],
+ [_ax_found_lua_libs='yes'],
+ [_ax_found_lua_libs='no'])
+ LIBS=$_ax_lua_saved_libs
+
+ dnl Check the result.
+ AS_IF([test "x$_ax_found_lua_libs" != 'xyes'],
+ [AC_MSG_ERROR([cannot find libs for specified LUA_LIB])])
+ ],
+ [ dnl First search for extra libs.
+ _ax_lua_extra_libs=''
+
+ _ax_lua_saved_libs=$LIBS
+ LIBS="$LIBS $LUA_LIB"
+ AC_SEARCH_LIBS([exp], [m])
+ AC_SEARCH_LIBS([dlopen], [dl])
+ LIBS=$_ax_lua_saved_libs
+
+ AS_IF([test "x$ac_cv_search_exp" != 'xno' &&
+ test "x$ac_cv_search_exp" != 'xnone required'],
+ [_ax_lua_extra_libs="$_ax_lua_extra_libs $ac_cv_search_exp"])
+
+ AS_IF([test "x$ac_cv_search_dlopen" != 'xno' &&
+ test "x$ac_cv_search_dlopen" != 'xnone required'],
+ [_ax_lua_extra_libs="$_ax_lua_extra_libs $ac_cv_search_dlopen"])
+
+ dnl Try to find the Lua libs.
+ _ax_lua_saved_libs=$LIBS
+ LIBS="$LIBS $LUA_LIB"
+ AC_SEARCH_LIBS([lua_load],
+ [ lua$LUA_VERSION \
+ lua$LUA_SHORT_VERSION \
+ lua-$LUA_VERSION \
+ lua-$LUA_SHORT_VERSION \
+ lua \
+ ],
+ [_ax_found_lua_libs='yes'],
+ [_ax_found_lua_libs='no'],
+ [$_ax_lua_extra_libs])
+ LIBS=$_ax_lua_saved_libs
+
+ AS_IF([test "x$ac_cv_search_lua_load" != 'xno' &&
+ test "x$ac_cv_search_lua_load" != 'xnone required'],
+ [LUA_LIB="$ac_cv_search_lua_load $_ax_lua_extra_libs"])
+ ])
+
+ dnl Test the result and run user code.
+ AS_IF([test "x$_ax_found_lua_libs" = 'xyes'], [$1],
+ [m4_default([$2], [AC_MSG_ERROR([cannot find Lua libs])])])
+])
+
+
+dnl =========================================================================
+dnl AX_LUA_READLINE([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl =========================================================================
+AC_DEFUN([AX_LUA_READLINE],
+[
+ AX_LIB_READLINE
+ AS_IF([test "x$ac_cv_header_readline_readline_h" != 'x' &&
+ test "x$ac_cv_header_readline_history_h" != 'x'],
+ [ LUA_LIBS_CFLAGS="-DLUA_USE_READLINE $LUA_LIBS_CFLAGS"
+ $1
+ ],
+ [$2])
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_prog_dotnetcore_version.m4 b/vendor/github.com/apache/thrift/aclocal/ax_prog_dotnetcore_version.m4
new file mode 100644
index 000000000..45c7a4e1a
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_prog_dotnetcore_version.m4
@@ -0,0 +1,61 @@
+# ==============================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_dotnetcore_version.html
+# ==============================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_DOTNETCORE_VERSION([VERSION],[ACTION-IF-TRUE],[ACTION-IF-FALSE])
+#
+# DESCRIPTION
+#
+# Makes sure that .NET Core supports the version indicated. If true the
+# shell commands in ACTION-IF-TRUE are executed. If not the shell commands
+# in ACTION-IF-FALSE are run. The $dotnetcore_version variable will be
+# filled with the detected version.
+#
+# This macro uses the $DOTNETCORE variable to perform the check. If
+# $DOTNETCORE is not set prior to calling this macro, the macro will fail.
+#
+# Example:
+#
+# AC_PATH_PROG([DOTNETCORE],[dotnet])
+# AC_PROG_DOTNETCORE_VERSION([1.0.2],[ ... ],[ ... ])
+#
+# Searches for .NET Core, then checks if at least version 1.0.2 is
+# present.
+#
+# LICENSE
+#
+# Copyright (c) 2016 Jens Geyer
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 1
+
+AC_DEFUN([AX_PROG_DOTNETCORE_VERSION],[
+ AC_REQUIRE([AC_PROG_SED])
+
+ AS_IF([test -n "$DOTNETCORE"],[
+ ax_dotnetcore_version="$1"
+
+ AC_MSG_CHECKING([for .NET Core version])
+ dotnetcore_version=`$DOTNETCORE --version 2>&1 | $SED -e 's/\(@<:@0-9@:>@*\.@<:@0-9@:>@*\.@<:@0-9@:>@*\)\(.*\)/\1/'`
+ AC_MSG_RESULT($dotnetcore_version)
+
+ AC_SUBST([DOTNETCORE_VERSION],[$dotnetcore_version])
+
+ AX_COMPARE_VERSION([$ax_dotnetcore_version],[le],[$dotnetcore_version],[
+ :
+ $2
+ ],[
+ :
+ $3
+ ])
+ ],[
+ AC_MSG_WARN([could not find .NET Core])
+ $3
+ ])
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_prog_haxe_version.m4 b/vendor/github.com/apache/thrift/aclocal/ax_prog_haxe_version.m4
new file mode 100644
index 000000000..3dee43027
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_prog_haxe_version.m4
@@ -0,0 +1,60 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_haxe_version.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_HAXE_VERSION([VERSION],[ACTION-IF-TRUE],[ACTION-IF-FALSE])
+#
+# DESCRIPTION
+#
+# Makes sure that haxe supports the version indicated. If true the shell
+# commands in ACTION-IF-TRUE are executed. If not the shell commands in
+# ACTION-IF-FALSE are run. The $HAXE_VERSION variable will be filled with
+# the detected version.
+#
+# This macro uses the $HAXE variable to perform the check. If $HAXE is not
+# set prior to calling this macro, the macro will fail.
+#
+# Example:
+#
+# AC_PATH_PROG([HAXE],[haxe])
+# AC_PROG_HAXE_VERSION([3.1.3],[ ... ],[ ... ])
+#
+# Searches for Haxe, then checks if at least version 3.1.3 is present.
+#
+# LICENSE
+#
+# Copyright (c) 2015 Jens Geyer
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 1
+
+AC_DEFUN([AX_PROG_HAXE_VERSION],[
+ AC_REQUIRE([AC_PROG_SED])
+
+ AS_IF([test -n "$HAXE"],[
+ ax_haxe_version="$1"
+
+ AC_MSG_CHECKING([for haxe version])
+ haxe_version=`$HAXE -version 2>&1 | $SED -e 's/^.* \( @<:@0-9@:>@*\.@<:@0-9@:>@*\.@<:@0-9@:>@*\) .*/\1/'`
+ AC_MSG_RESULT($haxe_version)
+
+ AC_SUBST([HAXE_VERSION],[$haxe_version])
+
+ AX_COMPARE_VERSION([$ax_haxe_version],[le],[$haxe_version],[
+ :
+ $2
+ ],[
+ :
+ $3
+ ])
+ ],[
+ AC_MSG_WARN([could not find Haxe])
+ $3
+ ])
+])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_prog_perl_modules.m4 b/vendor/github.com/apache/thrift/aclocal/ax_prog_perl_modules.m4
new file mode 100644
index 000000000..11a326c93
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_prog_perl_modules.m4
@@ -0,0 +1,77 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_perl_modules.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_PERL_MODULES([MODULES], [ACTION-IF-TRUE], [ACTION-IF-FALSE])
+#
+# DESCRIPTION
+#
+# Checks to see if the given perl modules are available. If true the shell
+# commands in ACTION-IF-TRUE are executed. If not the shell commands in
+# ACTION-IF-FALSE are run. Note if $PERL is not set (for example by
+# calling AC_CHECK_PROG, or AC_PATH_PROG), AC_CHECK_PROG(PERL, perl, perl)
+# will be run.
+#
+# MODULES is a space separated list of module names. To check for a
+# minimum version of a module, append the version number to the module
+# name, separated by an equals sign.
+#
+# Example:
+#
+# AX_PROG_PERL_MODULES( Text::Wrap Net::LDAP=1.0.3, ,
+# AC_MSG_WARN(Need some Perl modules)
+#
+# LICENSE
+#
+# Copyright (c) 2009 Dean Povey
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 7
+
+AU_ALIAS([AC_PROG_PERL_MODULES], [AX_PROG_PERL_MODULES])
+AC_DEFUN([AX_PROG_PERL_MODULES],[dnl
+
+m4_define([ax_perl_modules])
+m4_foreach([ax_perl_module], m4_split(m4_normalize([$1])),
+ [
+ m4_append([ax_perl_modules],
+ [']m4_bpatsubst(ax_perl_module,=,[ ])[' ])
+ ])
+
+# Make sure we have perl
+if test -z "$PERL"; then
+AC_CHECK_PROG(PERL,perl,perl)
+fi
+
+if test "x$PERL" != x; then
+ ax_perl_modules_failed=0
+ for ax_perl_module in ax_perl_modules; do
+ AC_MSG_CHECKING(for perl module $ax_perl_module)
+
+ # Would be nice to log result here, but can't rely on autoconf internals
+ $PERL -e "use $ax_perl_module; exit" > /dev/null 2>&1
+ if test $? -ne 0; then
+ AC_MSG_RESULT(no);
+ ax_perl_modules_failed=1
+ else
+ AC_MSG_RESULT(ok);
+ fi
+ done
+
+ # Run optional shell commands
+ if test "$ax_perl_modules_failed" = 0; then
+ :
+ $2
+ else
+ :
+ $3
+ fi
+else
+ AC_MSG_WARN(could not find perl)
+fi])dnl
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_signed_right_shift.m4 b/vendor/github.com/apache/thrift/aclocal/ax_signed_right_shift.m4
new file mode 100644
index 000000000..9c3ceb798
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_signed_right_shift.m4
@@ -0,0 +1,127 @@
+dnl @synopsis AX_SIGNED_RIGHT_SHIFT
+dnl
+dnl Tests the behavior of a right shift on a negative signed int.
+dnl
+dnl This macro calls:
+dnl AC_DEFINE(SIGNED_RIGHT_SHIFT_IS)
+dnl AC_DEFINE(ARITHMETIC_RIGHT_SHIFT)
+dnl AC_DEFINE(LOGICAL_RIGHT_SHIFT)
+dnl AC_DEFINE(UNKNOWN_RIGHT_SHIFT)
+dnl
+dnl SIGNED_RIGHT_SHIFT_IS will be equal to one of the other macros.
+dnl It also leaves the shell variables "ax_signed_right_shift"
+dnl set to "arithmetic", "logical", or "unknown".
+dnl
+dnl NOTE: This macro does not work for cross-compiling.
+dnl
+dnl @category C
+dnl @version 2009-03-25
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+AC_DEFUN([AX_SIGNED_RIGHT_SHIFT],
+ [
+
+ AC_MSG_CHECKING(the behavior of a signed right shift)
+
+ success_arithmetic=no
+ AC_RUN_IFELSE([AC_LANG_PROGRAM([[]], [[
+ return
+ /* 0xffffffff */
+ -1 >> 1 != -1 ||
+ -1 >> 2 != -1 ||
+ -1 >> 3 != -1 ||
+ -1 >> 4 != -1 ||
+ -1 >> 8 != -1 ||
+ -1 >> 16 != -1 ||
+ -1 >> 24 != -1 ||
+ -1 >> 31 != -1 ||
+ /* 0x80000000 */
+ (-2147483647 - 1) >> 1 != -1073741824 ||
+ (-2147483647 - 1) >> 2 != -536870912 ||
+ (-2147483647 - 1) >> 3 != -268435456 ||
+ (-2147483647 - 1) >> 4 != -134217728 ||
+ (-2147483647 - 1) >> 8 != -8388608 ||
+ (-2147483647 - 1) >> 16 != -32768 ||
+ (-2147483647 - 1) >> 24 != -128 ||
+ (-2147483647 - 1) >> 31 != -1 ||
+ /* 0x90800000 */
+ -1870659584 >> 1 != -935329792 ||
+ -1870659584 >> 2 != -467664896 ||
+ -1870659584 >> 3 != -233832448 ||
+ -1870659584 >> 4 != -116916224 ||
+ -1870659584 >> 8 != -7307264 ||
+ -1870659584 >> 16 != -28544 ||
+ -1870659584 >> 24 != -112 ||
+ -1870659584 >> 31 != -1 ||
+ 0;
+ ]])], [
+ success_arithmetic=yes
+ ])
+
+
+ success_logical=no
+ AC_RUN_IFELSE([AC_LANG_PROGRAM([[]], [[
+ return
+ /* 0xffffffff */
+ -1 >> 1 != (signed)((unsigned)-1 >> 1) ||
+ -1 >> 2 != (signed)((unsigned)-1 >> 2) ||
+ -1 >> 3 != (signed)((unsigned)-1 >> 3) ||
+ -1 >> 4 != (signed)((unsigned)-1 >> 4) ||
+ -1 >> 8 != (signed)((unsigned)-1 >> 8) ||
+ -1 >> 16 != (signed)((unsigned)-1 >> 16) ||
+ -1 >> 24 != (signed)((unsigned)-1 >> 24) ||
+ -1 >> 31 != (signed)((unsigned)-1 >> 31) ||
+ /* 0x80000000 */
+ (-2147483647 - 1) >> 1 != (signed)((unsigned)(-2147483647 - 1) >> 1) ||
+ (-2147483647 - 1) >> 2 != (signed)((unsigned)(-2147483647 - 1) >> 2) ||
+ (-2147483647 - 1) >> 3 != (signed)((unsigned)(-2147483647 - 1) >> 3) ||
+ (-2147483647 - 1) >> 4 != (signed)((unsigned)(-2147483647 - 1) >> 4) ||
+ (-2147483647 - 1) >> 8 != (signed)((unsigned)(-2147483647 - 1) >> 8) ||
+ (-2147483647 - 1) >> 16 != (signed)((unsigned)(-2147483647 - 1) >> 16) ||
+ (-2147483647 - 1) >> 24 != (signed)((unsigned)(-2147483647 - 1) >> 24) ||
+ (-2147483647 - 1) >> 31 != (signed)((unsigned)(-2147483647 - 1) >> 31) ||
+ /* 0x90800000 */
+ -1870659584 >> 1 != (signed)((unsigned)-1870659584 >> 1) ||
+ -1870659584 >> 2 != (signed)((unsigned)-1870659584 >> 2) ||
+ -1870659584 >> 3 != (signed)((unsigned)-1870659584 >> 3) ||
+ -1870659584 >> 4 != (signed)((unsigned)-1870659584 >> 4) ||
+ -1870659584 >> 8 != (signed)((unsigned)-1870659584 >> 8) ||
+ -1870659584 >> 16 != (signed)((unsigned)-1870659584 >> 16) ||
+ -1870659584 >> 24 != (signed)((unsigned)-1870659584 >> 24) ||
+ -1870659584 >> 31 != (signed)((unsigned)-1870659584 >> 31) ||
+ 0;
+ ]])], [
+ success_logical=yes
+ ])
+
+
+ AC_DEFINE([ARITHMETIC_RIGHT_SHIFT], 1, [Possible value for SIGNED_RIGHT_SHIFT_IS])
+ AC_DEFINE([LOGICAL_RIGHT_SHIFT], 2, [Possible value for SIGNED_RIGHT_SHIFT_IS])
+ AC_DEFINE([UNKNOWN_RIGHT_SHIFT], 3, [Possible value for SIGNED_RIGHT_SHIFT_IS])
+
+ if test "$success_arithmetic" = "yes" && test "$success_logical" = "yes" ; then
+ AC_MSG_ERROR("Right shift appears to be both arithmetic and logical!")
+ elif test "$success_arithmetic" = "yes" ; then
+ ax_signed_right_shift=arithmetic
+ AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 1,
+ [Indicates the effect of the right shift operator
+ on negative signed integers])
+ elif test "$success_logical" = "yes" ; then
+ ax_signed_right_shift=logical
+ AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 2,
+ [Indicates the effect of the right shift operator
+ on negative signed integers])
+ else
+ ax_signed_right_shift=unknown
+ AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 3,
+ [Indicates the effect of the right shift operator
+ on negative signed integers])
+ fi
+
+ AC_MSG_RESULT($ax_signed_right_shift)
+ ])
diff --git a/vendor/github.com/apache/thrift/aclocal/ax_thrift_internal.m4 b/vendor/github.com/apache/thrift/aclocal/ax_thrift_internal.m4
new file mode 100644
index 000000000..8c0e3cbc1
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/ax_thrift_internal.m4
@@ -0,0 +1,28 @@
+dnl @synopsis AX_THRIFT_GEN(SHORT_LANGUAGE, LONG_LANGUAGE, DEFAULT)
+dnl @synopsis AX_THRIFT_LIB(SHORT_LANGUAGE, LONG_LANGUAGE, DEFAULT)
+dnl
+dnl Allow a particular language generator to be disabled.
+dnl Allow a particular language library to be disabled.
+dnl
+dnl These macros have poor error handling and are poorly documented.
+dnl They are intended only for internal use by the Thrift compiler.
+dnl
+dnl @version 2008-02-20
+dnl @license AllPermissive
+dnl
+dnl Copyright (C) 2009 David Reiss
+dnl Copying and distribution of this file, with or without modification,
+dnl are permitted in any medium without royalty provided the copyright
+dnl notice and this notice are preserved.
+
+AC_DEFUN([AX_THRIFT_LIB],
+ [
+ AC_ARG_WITH($1,
+ AC_HELP_STRING([--with-$1], [build the $2 library @<:@default=$3@:>@]),
+ [with_$1="$withval"],
+ [with_$1=$3]
+ )
+ have_$1=no
+ dnl What we do here is going to vary from library to library,
+ dnl so we can't really generalize (yet!).
+ ])
diff --git a/vendor/github.com/apache/thrift/aclocal/m4_ax_compare_version.m4 b/vendor/github.com/apache/thrift/aclocal/m4_ax_compare_version.m4
new file mode 100644
index 000000000..74dc0fdd9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/aclocal/m4_ax_compare_version.m4
@@ -0,0 +1,177 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_compare_version.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
+#
+# DESCRIPTION
+#
+# This macro compares two version strings. Due to the various number of
+# minor-version numbers that can exist, and the fact that string
+# comparisons are not compatible with numeric comparisons, this is not
+# necessarily trivial to do in a autoconf script. This macro makes doing
+# these comparisons easy.
+#
+# The six basic comparisons are available, as well as checking equality
+# limited to a certain number of minor-version levels.
+#
+# The operator OP determines what type of comparison to do, and can be one
+# of:
+#
+# eq - equal (test A == B)
+# ne - not equal (test A != B)
+# le - less than or equal (test A <= B)
+# ge - greater than or equal (test A >= B)
+# lt - less than (test A < B)
+# gt - greater than (test A > B)
+#
+# Additionally, the eq and ne operator can have a number after it to limit
+# the test to that number of minor versions.
+#
+# eq0 - equal up to the length of the shorter version
+# ne0 - not equal up to the length of the shorter version
+# eqN - equal up to N sub-version levels
+# neN - not equal up to N sub-version levels
+#
+# When the condition is true, shell commands ACTION-IF-TRUE are run,
+# otherwise shell commands ACTION-IF-FALSE are run. The environment
+# variable 'ax_compare_version' is always set to either 'true' or 'false'
+# as well.
+#
+# Examples:
+#
+# AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8])
+# AX_COMPARE_VERSION([3.15],[lt],[3.15.8])
+#
+# would both be true.
+#
+# AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8])
+# AX_COMPARE_VERSION([3.15],[gt],[3.15.8])
+#
+# would both be false.
+#
+# AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8])
+#
+# would be true because it is only comparing two minor versions.
+#
+# AX_COMPARE_VERSION([3.15.7],[eq0],[3.15])
+#
+# would be true because it is only comparing the lesser number of minor
+# versions of the two values.
+#
+# Note: The characters that separate the version numbers do not matter. An
+# empty string is the same as version 0. OP is evaluated by autoconf, not
+# configure, so must be a string, not a variable.
+#
+# The author would like to acknowledge Guido Draheim whose advice about
+# the m4_case and m4_ifvaln functions make this macro only include the
+# portions necessary to perform the specific comparison specified by the
+# OP argument in the final configure script.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Tim Toolan
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 11
+
+dnl #########################################################################
+AC_DEFUN([AX_COMPARE_VERSION], [
+ AC_REQUIRE([AC_PROG_AWK])
+
+ # Used to indicate true or false condition
+ ax_compare_version=false
+
+ # Convert the two version strings to be compared into a format that
+ # allows a simple string comparison. The end result is that a version
+ # string of the form 1.12.5-r617 will be converted to the form
+ # 0001001200050617. In other words, each number is zero padded to four
+ # digits, and non digits are removed.
+ AS_VAR_PUSHDEF([A],[ax_compare_version_A])
+ A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
+ -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/[[^0-9]]//g'`
+
+ AS_VAR_PUSHDEF([B],[ax_compare_version_B])
+ B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
+ -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
+ -e 's/[[^0-9]]//g'`
+
+ dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary
+ dnl # then the first line is used to determine if the condition is true.
+ dnl # The sed right after the echo is to remove any indented white space.
+ m4_case(m4_tolower($2),
+ [lt],[
+ ax_compare_version=`echo "x$A
+x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"`
+ ],
+ [gt],[
+ ax_compare_version=`echo "x$A
+x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"`
+ ],
+ [le],[
+ ax_compare_version=`echo "x$A
+x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"`
+ ],
+ [ge],[
+ ax_compare_version=`echo "x$A
+x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"`
+ ],[
+ dnl Split the operator from the subversion count if present.
+ m4_bmatch(m4_substr($2,2),
+ [0],[
+ # A count of zero means use the length of the shorter version.
+ # Determine the number of characters in A and B.
+ ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'`
+ ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'`
+
+ # Set A to no more than B's length and B to no more than A's length.
+ A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"`
+ B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"`
+ ],
+ [[0-9]+],[
+ # A count greater than zero means use only that many subversions
+ A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
+ B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
+ ],
+ [.+],[
+ AC_WARNING(
+ [illegal OP numeric parameter: $2])
+ ],[])
+
+ # Pad zeros at end of numbers to make same length.
+ ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`"
+ B="$B`echo $A | sed 's/./0/g'`"
+ A="$ax_compare_version_tmp_A"
+
+ # Check for equality or inequality as necessary.
+ m4_case(m4_tolower(m4_substr($2,0,2)),
+ [eq],[
+ test "x$A" = "x$B" && ax_compare_version=true
+ ],
+ [ne],[
+ test "x$A" != "x$B" && ax_compare_version=true
+ ],[
+ AC_WARNING([illegal OP parameter: $2])
+ ])
+ ])
+
+ AS_VAR_POPDEF([A])dnl
+ AS_VAR_POPDEF([B])dnl
+
+ dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE.
+ if test "$ax_compare_version" = "true" ; then
+ m4_ifvaln([$4],[$4],[:])dnl
+ m4_ifvaln([$5],[else $5])dnl
+ fi
+]) dnl AX_COMPARE_VERSION
diff --git a/vendor/github.com/apache/thrift/appveyor.yml b/vendor/github.com/apache/thrift/appveyor.yml
new file mode 100755
index 000000000..42c291124
--- /dev/null
+++ b/vendor/github.com/apache/thrift/appveyor.yml
@@ -0,0 +1,97 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# build Apache Thrift on AppVeyor - https://ci.appveyor.com
+
+version: '1.0.0-dev.{build}'
+
+shallow_clone: true
+
+os:
+ - Visual Studio 2015
+
+cache:
+ - C:\projects\thrift\buildcache -> build\appveyor\MSVC-appveyor-install.bat
+ - C:\ProgramData\chocolatey\lib -> build\appveyor\MSVC-appveyor-install.bat
+ - C:\msys64\var\cache\pacman -> build\appveyor\MSYS-appveyor-install.bat
+
+environment:
+ matrix:
+ - PROFILE: MSVC2010
+ PLATFORM: x86
+ CONFIGURATION: Debug
+ BOOST_VERSION: 1.54.0
+ LIBEVENT_VERSION: 2.0.22
+ QT_VERSION: 5.6
+ ZLIB_VERSION: 1.2.8
+ DISABLED_TESTS: StressTestNonBlocking|concurrency_test
+
+ - PROFILE: MSVC2015
+ PLATFORM: x64
+ CONFIGURATION: Release
+ BOOST_VERSION: 1.63.0
+ LIBEVENT_VERSION: 2.0.22
+ PYTHON_VERSION: 3.6
+ QT_VERSION: 5.8
+ ZLIB_VERSION: 1.2.11
+ DISABLED_TESTS: StressTestNonBlocking
+
+ - PROFILE: MINGW
+ PLATFORM: x64
+ CONFIGURATION: Release
+
+matrix:
+ fast_finish: true
+
+install:
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - call build\appveyor\%PROFILE:~0,4%-appveyor-install.bat
+ - refreshenv
+
+build_script:
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - call build\appveyor\%PROFILE:~0,4%-appveyor-build.bat
+
+test_script:
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - call build\appveyor\%PROFILE:~0,4%-appveyor-test.bat
+
+
+# artifact capture disabled as it might increase service cost for little gain:
+#
+# artifacts:
+# - path: local-thrift-inst
+# name: cmake installed content
+# type: zip
+#
+# - path: local-thrift-build\Testing
+# name: ctest output
+# type: zip
+
+# RDP support: use one or the other...
+#
+# enables RDP for each build job so you can inspect the environment at the beginning of the job:
+# init:
+# - ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
+#
+# enables RDP at the end of the build job so you can login and re-run
+# commands to see why something failed...
+# on_finish:
+# - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
+
diff --git a/vendor/github.com/apache/thrift/bootstrap.sh b/vendor/github.com/apache/thrift/bootstrap.sh
new file mode 100755
index 000000000..52ecda47b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/bootstrap.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+./cleanup.sh
+if test -d lib/php/src/ext/thrift_protocol ; then
+ if phpize -v >/dev/null 2>/dev/null ; then
+ (cd lib/php/src/ext/thrift_protocol && phpize)
+ fi
+fi
+
+set -e
+
+# libtoolize is called "glibtoolize" on OSX.
+if libtoolize --version 1 >/dev/null 2>/dev/null; then
+ LIBTOOLIZE=libtoolize
+elif glibtoolize --version 1 >/dev/null 2>/dev/null; then
+ LIBTOOLIZE=glibtoolize
+else
+ echo >&2 "Couldn't find libtoolize!"
+ exit 1
+fi
+
+# we require automake 1.13 or later
+# check must happen externally due to use of newer macro
+AUTOMAKE_VERSION=`automake --version | grep automake | egrep -o '([0-9]{1,}\.)+[0-9]{1,}'`
+if [ "$AUTOMAKE_VERSION" \< "1.13" ]; then
+ echo >&2 "automake version $AUTOMAKE_VERSION is too old (need 1.13 or later)"
+ exit 1
+fi
+
+autoscan
+$LIBTOOLIZE --copy --automake
+aclocal -I ./aclocal
+autoheader
+autoconf
+automake --copy --add-missing --foreign
diff --git a/vendor/github.com/apache/thrift/bower.json b/vendor/github.com/apache/thrift/bower.json
new file mode 100644
index 000000000..1092c650e
--- /dev/null
+++ b/vendor/github.com/apache/thrift/bower.json
@@ -0,0 +1,15 @@
+{
+ "name": "thrift",
+ "homepage": "https://git-wip-us.apache.org/repos/asf/thrift.git",
+ "authors": [
+ "Apache Thrift "
+ ],
+ "description": "Apache Thrift",
+ "main": "lib/js/src/thrift.js",
+ "keywords": [
+ "thrift"
+ ],
+ "license": "Apache v2",
+ "ignore": [
+ ]
+}
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-build.bat b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-build.bat
new file mode 100644
index 000000000..838e42880
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-build.bat
@@ -0,0 +1,36 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_build.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+
+SET CMAKEARGS=^
+ -G'%GENERATOR%' ^
+ -DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
+ -DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^
+ -DCMAKE_MAKE_PROGRAM=/mingw64/bin/mingw32-make ^
+ -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc.exe ^
+ -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++.exe ^
+ -DWITH_LIBEVENT=OFF ^
+ -DWITH_PYTHON=OFF ^
+ -DWITH_SHARED_LIB=OFF ^
+ -DWITH_STATIC_LIB=ON
+
+@ECHO ON
+%BASH% -lc "mkdir -p %BUILDDIR_MSYS% && cd %BUILDDIR_MSYS% && cmake.exe %SRCDIR_MSYS% %CMAKEARGS% && cmake --build . --config %CONFIGURATION% --target install" || EXIT /B
+@ECHO OFF
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-install.bat b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-install.bat
new file mode 100644
index 000000000..0d5f99e4d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-install.bat
@@ -0,0 +1,21 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Appveyor install script for MinGW
+:: Installs (or builds) third party packages we need
+::
+
+:: Same as the MSYS installation requirements
+CALL build\appveyor\MSYS-appveyor-install.bat
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-test.bat b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-test.bat
new file mode 100644
index 000000000..c37c72a9c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MING-appveyor-test.bat
@@ -0,0 +1,16 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+:: Same as MSYS2
+CALL build\appveyor\MSYS-appveyor-test.bat
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-build.bat b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-build.bat
new file mode 100644
index 000000000..054a8b414
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-build.bat
@@ -0,0 +1,45 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_build.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+MKDIR "%BUILDDIR%" || EXIT /B
+CD "%BUILDDIR%" || EXIT /B
+
+@ECHO ON
+ cmake "%SRCDIR%" ^
+ -G"%GENERATOR%" ^
+ -DBOOST_ROOT="%BOOST_ROOT%" ^
+ -DBOOST_LIBRARYDIR="%BOOST_LIBRARYDIR%" ^
+ -DCMAKE_BUILD_TYPE="%CONFIGURATION%" ^
+ -DCMAKE_INSTALL_PREFIX="%INSTDIR%" ^
+ -DINTTYPES_ROOT="%WIN3P%\msinttypes" ^
+ -DLIBEVENT_ROOT="%WIN3P%\libevent-%LIBEVENT_VERSION%-stable" ^
+ -DOPENSSL_ROOT_DIR="%OPENSSL_ROOT%" ^
+ -DOPENSSL_USE_STATIC_LIBS=OFF ^
+ -DZLIB_LIBRARY="%WIN3P%\zlib-inst\lib\zlib%ZLIB_LIB_SUFFIX%.lib" ^
+ -DZLIB_ROOT="%WIN3P%\zlib-inst" ^
+ -DWITH_PYTHON=%WITH_PYTHON% ^
+ -DWITH_%THREADMODEL%THREADS=ON ^
+ -DWITH_SHARED_LIB=OFF ^
+ -DWITH_STATIC_LIB=ON || EXIT /B
+@ECHO OFF
+
+cmake --build . ^
+ --config "%CONFIGURATION%" ^
+ --target INSTALL || EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-install.bat b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-install.bat
new file mode 100644
index 000000000..573700e0c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-install.bat
@@ -0,0 +1,72 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Appveyor install script for MSVC
+:: Installs (or builds) third party packages we need
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_install.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+CALL cl_showenv.bat || EXIT /B
+MKDIR "%WIN3P%" || EXIT /B
+
+:: Install ant - this also installs the latest JDK as a dependency
+:: The installation of JDK requires us to pick up PATH and JAVE_HOME from the registry
+cinst -c "%BUILDCACHE%" -y ant || EXIT /B
+
+:: Install bison and flex
+cinst -c "%BUILDCACHE%" -y winflexbison3 || EXIT /B
+
+:: zlib
+CD "%APPVEYOR_SCRIPTS%" || EXIT /B
+call build-zlib.bat || EXIT /B
+
+:: libevent
+CD "%APPVEYOR_SCRIPTS%" || EXIT /B
+call build-libevent.bat || EXIT /B
+
+:: python packages
+pip install backports.ssl_match_hostname ^
+ ipaddress ^
+ tornado ^
+ twisted || EXIT /B
+
+:: msinttypes - for MSVC2010 only
+SET MSINTTYPESURL=https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/msinttypes/msinttypes-r26.zip
+IF "%COMPILER%" == "vc100" (
+ MKDIR "%WIN3P%\msinttypes" || EXIT /B
+ CD "%WIN3P%\msinttypes" || EXIT /B
+ appveyor DownloadFile "%MSINTTYPESURL%" || EXIT /B
+ 7z x "msinttypes-r26.zip" || EXIT /B
+)
+
+:: appveyor build slaves do not have MSVC2010 Boost installed
+IF "%COMPILER%" == "vc100" (
+ SET BITS=64
+ IF "%PLATFORM%" == "x86" (
+ SET BITS=32
+ )
+ SET BOOSTEXEURL=https://downloads.sourceforge.net/project/boost/boost-binaries/%BOOST_VERSION%/boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe
+ SET BOOSTEXE=C:\projects\thrift\buildcache\boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe
+ appveyor DownloadFile "!BOOSTEXEURL!" -FileName "!BOOSTEXE!" || EXIT /B
+ "!BOOSTEXE!" /dir=C:\Libraries\boost_%BOOST_VERSION:.=_% /silent || EXIT /B
+)
+
+:: Haskell (GHC) and cabal
+cinst -c "%BUILDCACHE%" -y ghc || EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-test.bat b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-test.bat
new file mode 100644
index 000000000..16ee2078e
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSVC-appveyor-test.bat
@@ -0,0 +1,25 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+CD build\appveyor || EXIT /B
+CALL cl_banner_test.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+CD "%BUILDDIR%" || EXIT /B
+
+:: Add directories to the path to find DLLs of third party libraries so tests run
+SET PATH=%BOOST_LIBRARYDIR%;%OPENSSL_ROOT%\bin;%WIN3P%\zlib-inst\bin;%PATH%
+
+ctest -C %CONFIGURATION% --timeout 300 -VV -E "(%DISABLED_TESTS%)" || EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-build.bat b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-build.bat
new file mode 100644
index 000000000..b9d8955e2
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-build.bat
@@ -0,0 +1,47 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_build.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+
+SET BASH=C:\msys64\usr\bin\bash
+SET CMAKE=/c/msys64/mingw64/bin/cmake.exe
+
+@ECHO ON
+SET CMAKEARGS=-G\"%GENERATOR%\" ^
+ -DBoost_DEBUG=ON ^
+ -DBoost_NAMESPACE=libboost ^
+ -DBOOST_INCLUDEDIR=%BOOST_INCLUDEDIR% ^
+ -DBOOST_LIBRARYDIR=%BOOST_LIBRARYDIR% ^
+ -DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
+ -DCMAKE_C_COMPILER=gcc.exe ^
+ -DCMAKE_CXX_COMPILER=g++.exe ^
+ -DCMAKE_MAKE_PROGRAM=make.exe ^
+ -DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^
+ -DOPENSSL_LIBRARIES=%OPENSSL_LIBRARIES% ^
+ -DOPENSSL_ROOT_DIR=%OPENSSL_ROOT% ^
+ -DOPENSSL_USE_STATIC_LIBS=ON ^
+ -DWITH_BOOST_STATIC=ON ^
+ -DWITH_JAVA=OFF ^
+ -DWITH_LIBEVENT=OFF ^
+ -DWITH_PYTHON=%WITH_PYTHON% ^
+ -DWITH_SHARED_LIB=OFF ^
+ -DWITH_STATIC_LIB=ON
+
+%BASH% -lc "mkdir %BUILDDIR_MSYS% && cd %BUILDDIR_MSYS% && %CMAKE% %SRCDIR_MSYS% %CMAKEARGS% && %CMAKE% --build . --config %CONFIGURATION% --target install" || EXIT /B
+@ECHO OFF
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-install.bat b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-install.bat
new file mode 100644
index 000000000..ff43cd371
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-install.bat
@@ -0,0 +1,41 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Appveyor install script for MSYS
+:: Installs (or builds) third party packages we need
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_install.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+CALL cl_showenv.bat || EXIT /B
+
+SET PACKAGES=^
+ --needed -S bison flex ^
+ make ^
+ mingw-w64-x86_64-boost ^
+ mingw-w64-x86_64-cmake ^
+ mingw-w64-x86_64-openssl ^
+ mingw-w64-x86_64-toolchain ^
+ mingw-w64-x86_64-zlib
+
+:: omitting libevent-devel for now it is version 2.1.4 and doesn't play nice with MinGW
+
+%BASH% -lc "pacman --noconfirm -Syu" || EXIT /B
+%BASH% -lc "pacman --noconfirm -Su" || EXIT /B
+%BASH% -lc "pacman --noconfirm %PACKAGES%" || EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-test.bat b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-test.bat
new file mode 100644
index 000000000..0f37ec51f
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/MSYS-appveyor-test.bat
@@ -0,0 +1,26 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+CD build\appveyor || EXIT /B
+CALL cl_banner_test.bat || EXIT /B
+CALL cl_setenv.bat || EXIT /B
+CD "%BUILDDIR%" || EXIT /B
+
+:: randomly fails on mingw; see Jira THRIFT-4106
+SET DISABLED_TESTS=concurrency_test
+
+%BASH% -lc "cd %BUILDDIR_MSYS% && ctest.exe -C %CONFIGURATION% --timeout 300 -VV -E '(%DISABLED_TESTS%)'" || EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/README.md b/vendor/github.com/apache/thrift/build/appveyor/README.md
new file mode 100644
index 000000000..1a2aa306b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/README.md
@@ -0,0 +1,34 @@
+
+
+# Appveyor Build
+
+Appveyor is capable of building MSVC 2010 through 2015 as well as
+having the latest MSYS2/MinGW 64-bit environment. It has many versions
+of boost and python installed as well. See what appveyor has
+[installed on build workers](https://www.appveyor.com/docs/installed-software/).
+
+We run a matrix build on Appveyor and build the following combinations:
+
+* MinGW x64 (gcc 6.3.0)
+* MSVC 2010 x86, an older boost, an older python
+* MSVC 2015 x86/x64, the latest boost, the latest python
+* MSYS2 x64 (gcc 6.3.0) - this is a work in progress
+
+The Appveyor script takes the first four letters from the PROFILE specified in
+the environment stanza and runs these scripts in order:
+
+????-appveyor-install.bat will install third party libraries and set up the environment
+????-appveyor-build.bat will build with cmake
+????-appveyor-test.bat will run ctest
diff --git a/vendor/github.com/apache/thrift/build/appveyor/build-libevent.bat b/vendor/github.com/apache/thrift/build/appveyor/build-libevent.bat
new file mode 100644
index 000000000..13c74ee15
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/build-libevent.bat
@@ -0,0 +1,30 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+SETLOCAL EnableDelayedExpansion
+
+SET URLFILE=libevent-%LIBEVENT_VERSION%-stable.tar.gz
+SET URL=https://github.com/libevent/libevent/releases/download/release-%LIBEVENT_VERSION%-stable/%URLFILE%
+
+CD %WIN3P% || EXIT /B
+appveyor DownloadFile %URL% || EXIT /B
+7z x %URLFILE% -so | 7z x -si -ttar > nul || EXIT /B
+CD "libevent-%LIBEVENT_VERSION%-stable" || EXIT /B
+nmake -f Makefile.nmake || EXIT /B
+mkdir lib || EXIT /B
+move *.lib lib\ || EXIT /B
+move WIN32-Code\event2\* include\event2\ || EXIT /B
+move *.h include\ || EXIT /B
+
+ENDLOCAL
diff --git a/vendor/github.com/apache/thrift/build/appveyor/build-zlib.bat b/vendor/github.com/apache/thrift/build/appveyor/build-zlib.bat
new file mode 100644
index 000000000..d8811a153
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/build-zlib.bat
@@ -0,0 +1,49 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+SETLOCAL EnableDelayedExpansion
+
+SET PACKAGE=zlib-%ZLIB_VERSION%
+SET BUILDDIR=%WIN3P%\zlib-build
+SET INSTDIR=%WIN3P%\zlib-inst
+SET SRCDIR=%WIN3P%\%PACKAGE%
+SET URLFILE=%PACKAGE%.tar.gz
+
+:: This allows us to tolerate when the current version is archived
+SET URL=http://zlib.net/%URLFILE%
+SET FURL=http://zlib.net/fossils/%URLFILE%
+
+:: Download
+CD "%WIN3P%" || EXIT /B
+appveyor DownloadFile "%URL%"
+IF ERRORLEVEL 1 (
+ appveyor DownloadFile "%FURL%" || EXIT /B
+)
+7z x "%URLFILE%" -so | 7z x -si -ttar > nul || EXIT /B
+
+:: Generate
+MKDIR "%BUILDDIR%" || EXIT /B
+CD "%BUILDDIR%" || EXIT /B
+cmake "%SRCDIR%" ^
+ -G"NMake Makefiles" ^
+ -DCMAKE_INSTALL_PREFIX="%INSTDIR%" ^
+ -DCMAKE_BUILD_TYPE="%CONFIGURATION%" || EXIT /B
+
+:: Build
+nmake /fMakefile install || EXIT /B
+IF "%CONFIGURATION%" == "Debug" (
+ COPY "%BUILDDIR%\zlibd.pdb" "%INSTDIR%\bin\" || EXIT /B
+)
+
+ENDLOCAL
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_banner_apache_thrift.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_apache_thrift.bat
new file mode 100644
index 000000000..78f2a2aab
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_apache_thrift.bat
@@ -0,0 +1,24 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+:: A visual indicator in a large log helps you locate things when scanning
+:: http://www.patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Apache%20Thrift
+
+ECHO/
+ECHO ___ __ ________ _ _____
+ECHO / _ | ___ ___ _____/ / ___ /_ __/ / ____(_) _/ /_
+ECHO / __ |/ _ \/ _ `/ __/ _ \/ -_) / / / _ \/ __/ / _/ __/
+ECHO /_/ |_/ .__/\_,_/\__/_//_/\__/ /_/ /_//_/_/ /_/_/ \__/
+ECHO /_/
+ECHO/
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_banner_build.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_build.bat
new file mode 100644
index 000000000..60272f335
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_build.bat
@@ -0,0 +1,23 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+:: A visual indicator in a large log helps you locate things when scanning
+:: http://www.patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Build
+
+ECHO/
+ECHO ___ _ __ __
+ECHO / _ )__ __(_) /__/ /
+ECHO / _ / // / / / _ / @@@ BUILD
+ECHO /____/\_,_/_/_/\_,_/
+ECHO/
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_banner_install.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_install.bat
new file mode 100644
index 000000000..fde3da21a
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_install.bat
@@ -0,0 +1,23 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+:: A visual indicator in a large log helps you locate things when scanning
+:: http://www.patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Install
+
+ECHO/
+ECHO ____ __ ____
+ECHO / _/__ ___ / /____ _/ / /
+ECHO _/ // _ \(_-^ __/ _ \/ / / @@@ INSTALL
+ECHO /___/_//_/___/\__/\_,_/_/_/
+ECHO/
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_banner_test.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_test.bat
new file mode 100644
index 000000000..44e2d1077
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_banner_test.bat
@@ -0,0 +1,23 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+:: A visual indicator in a large log helps you locate things when scanning
+:: http://www.patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Test
+
+ECHO/
+ECHO ______ __
+ECHO /_ __/__ ___ / /_
+ECHO / / / -_^|_-^ __/ @@@ TEST
+ECHO /_/ \__/___/\__/
+ECHO/
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_setcompiler.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_setcompiler.bat
new file mode 100644
index 000000000..b97da7359
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_setcompiler.bat
@@ -0,0 +1,63 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Detect the compiler edition we're building in.
+:: Set the COMPILER environment variable to one of:
+:: gcc = MinGW / MSYS2 and gcc toolchain
+:: vc100 = Visual Studio 2010
+:: vc110 = Visual Studio 2012
+:: vc120 = Visual Studio 2013
+:: vc140 = Visual Studio 2015
+:: vc150 = Visual Studio 2017
+::
+:: Honors any existing COMPILER environment variable
+:: setting instead of overwriting it, to allow it
+:: to be forced if needed.
+::
+:: Sets ERRORLEVEL to 0 if COMPILER can be determined,
+:: to 1 if it cannot.
+::
+
+IF DEFINED COMPILER (
+ ECHO [warn ] using existing environment variable COMPILER
+ EXIT /B 0
+)
+
+IF NOT "%PROFILE:~0,4%" == "MSVC" (
+ SET COMPILER=gcc
+) ELSE (
+ CALL :CHECK 16
+ IF !ERRORLEVEL! == 0 (SET COMPILER=vc100)
+ CALL :CHECK 17
+ IF !ERRORLEVEL! == 0 (SET COMPILER=vc110)
+ CALL :CHECK 18
+ IF !ERRORLEVEL! == 0 (SET COMPILER=vc120)
+ CALL :CHECK 19.00
+ IF !ERRORLEVEL! == 0 (SET COMPILER=vc140)
+ CALL :CHECK 19.10
+ IF !ERRORLEVEL! == 0 (SET COMPILER=vc150)
+)
+
+IF NOT DEFINED COMPILER (
+ ECHO [error] unable to determine the compiler edition
+ EXIT /B 1
+)
+
+ECHO [info ] detected compiler edition %COMPILER%
+EXIT /B 0
+
+:CHECK
+cl /? 2>&1 | findstr /C:"Version %1%." > nul
+EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_setenv.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_setenv.bat
new file mode 100644
index 000000000..e80d6b569
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_setenv.bat
@@ -0,0 +1,92 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+ IF "%PROFILE%" == "MSVC2010" (
+ CALL "C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\vcvarsall.bat" %PLATFORM%
+) ELSE IF "%PROFILE%" == "MSVC2012" (
+ CALL "C:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\vcvarsall.bat" %PLATFORM%
+) ELSE IF "%PROFILE%" == "MSVC2013" (
+ CALL "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" %PLATFORM%
+) ELSE IF "%PROFILE%" == "MSVC2015" (
+ CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %PLATFORM%
+) ELSE IF "%PROFILE%" == "MSVC2017" (
+ CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\Tools\VsDevCmd.bat" %PLATFORM%
+) ELSE IF "%PROFILE%" == "MINGW" (
+ SET MSYS2_PATH_TYPE=stock
+) ELSE IF "%PROFILE%" == "MSYS" (
+ SET MSYS2_PATH_TYPE=stock
+) ELSE (
+ ECHO Unsupported PROFILE=%PROFILE% or PLATFORM=%PLATFORM%
+ EXIT /B 1
+)
+
+CALL cl_setcompiler.bat || EXIT /B
+CALL cl_setgenerator.bat || EXIT /B
+
+SET APPVEYOR_SCRIPTS=%APPVEYOR_BUILD_FOLDER%\build\appveyor
+SET BUILDCACHE=%APPVEYOR_BUILD_FOLDER%\buildcache
+SET BUILDDIR=%APPVEYOR_BUILD_FOLDER%\local-thrift-build
+SET INSTDIR=%APPVEYOR_BUILD_FOLDER%\local-thrift-inst
+SET SRCDIR=%APPVEYOR_BUILD_FOLDER%
+
+: PLATFORM is x64 or x86, but we want x86 to become "32" when we strip it down for paths:
+SET NORM_PLATFORM=%PLATFORM:~-2,2%
+IF "%NORM_PLATFORM%" == "86" (SET NORM_PLATFORM=32)
+
+:: FindBoost needs forward slashes so cmake doesn't see something as an escaped character
+SET BOOST_ROOT=C:/Libraries/boost_%BOOST_VERSION:.=_%
+SET BOOST_LIBRARYDIR=%BOOST_ROOT%/lib%NORM_PLATFORM%-msvc-%COMPILER:~-3,2%.0
+SET OPENSSL_ROOT=C:\OpenSSL-Win%NORM_PLATFORM%
+SET WIN3P=%APPVEYOR_BUILD_FOLDER%\thirdparty
+
+:: MSVC2010 doesn't "do" std::thread
+IF "%COMPILER%" == "vc100" (
+ SET THREADMODEL=BOOST
+) ELSE (
+ SET THREADMODEL=STD
+)
+
+IF "%PYTHON_VERSION%" == "" (
+ SET WITH_PYTHON=OFF
+) ELSE (
+ SET WITH_PYTHON=ON
+ SET PATH=C:\Python%PYTHON_VERSION:.=%\scripts;C:\Python%PYTHON_VERSION:.=%;!PATH!
+)
+IF "%CONFIGURATION%" == "Debug" (SET ZLIB_LIB_SUFFIX=d)
+
+IF NOT "%QT_VERSION%" == "" (
+ IF /i "%PLATFORM%" == "x64" SET QTEXT=_64
+ SET PATH=C:\Qt\%QT_VERSION%\%PROFILE%!QTEXT!\bin;!PATH!
+)
+
+IF NOT "%PROFILE:~0,4%" == "MSVC" (
+
+ SET BASH=C:\msys64\usr\bin\bash.exe
+ SET BOOST_ROOT=
+ SET BOOST_INCLUDEDIR=/mingw64/include
+ SET BOOST_LIBRARYDIR=/mingw64/lib
+ SET OPENSSL_LIBRARIES=/mingw64/lib
+ SET OPENSSL_ROOT=/mingw64
+ SET WIN3P=
+
+ !BASH! -lc "sed -i '/export PATH=\/mingw64\/bin/d' ~/.bash_profile && echo 'export PATH=/mingw64/bin:$PATH' >> ~/.bash_profile" || EXIT /B
+
+)
+
+SET BUILDDIR_MSYS=%BUILDDIR:\=/%
+SET BUILDDIR_MSYS=/c%BUILDDIR_MSYS:~2%
+SET INSTDIR_MSYS=%INSTDIR:\=/%
+SET INSTDIR_MSYS=/c%INSTDIR_MSYS:~2%
+SET SRCDIR_MSYS=%SRCDIR:\=/%
+SET SRCDIR_MSYS=/c%SRCDIR_MSYS:~2%
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_setgenerator.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_setgenerator.bat
new file mode 100644
index 000000000..7ca98530f
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_setgenerator.bat
@@ -0,0 +1,74 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Detect the compiler edition we're building in and then
+:: set the GENERATOR environment variable to one of:
+::
+:: Visual Studio 15 2017 [arch] = Generates Visual Studio 2017 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 14 2015 [arch] = Generates Visual Studio 2015 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 12 2013 [arch] = Generates Visual Studio 2013 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 11 2012 [arch] = Generates Visual Studio 2012 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 10 2010 [arch] = Generates Visual Studio 2010 project files.
+:: Optional [arch] can be "Win64" or "IA64".
+:: MinGW Makefiles = Generates makefiles for MinGW
+:: MSYS Makefiles = Generates makefiles for MSYS
+::
+:: Honors any existing GENERATOR environment variable
+:: setting instead of overwriting it, to allow it
+:: to be forced if needed.
+::
+:: Sets ERRORLEVEL to 0 if GENERATOR can be determined,
+:: to 1 if it cannot.
+::
+
+IF DEFINED GENERATOR (
+ ECHO [warn ] using existing environment variable GENERATOR
+ EXIT /B 0
+)
+
+
+IF "%PROFILE:~0,4%" == "MING" (
+ SET GENERATOR=MinGW Makefiles
+) ELSE IF "%PROFILE:~0,4%" == "MSYS" (
+ SET GENERATOR=MSYS Makefiles
+) ELSE (
+ IF /i "%PLATFORM%" == "x64" SET GENARCH= Win64
+ CALL :CHECK 16
+ IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 10 2010!GENARCH!
+ CALL :CHECK 17
+ IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 11 2012!GENARCH!
+ CALL :CHECK 18
+ IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 12 2013!GENARCH!
+ CALL :CHECK 19.00
+ IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 14 2015!GENARCH!
+ CALL :CHECK 19.10
+ IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 15 2017!GENARCH!
+)
+
+IF NOT DEFINED GENERATOR (
+ ECHO [error] unable to determine the CMake generator to use
+ EXIT /B 1
+)
+
+ECHO [info ] using CMake generator %GENERATOR%
+EXIT /B 0
+
+:CHECK
+cl /? 2>&1 | findstr /C:"Version %1%." > nul
+EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/appveyor/cl_showenv.bat b/vendor/github.com/apache/thrift/build/appveyor/cl_showenv.bat
new file mode 100644
index 000000000..33dd66072
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/cl_showenv.bat
@@ -0,0 +1,67 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+ECHO/
+ECHO ===============================================================================
+IF "%PROFILE:~0,4%" == "MSVC" (
+ECHO Versions
+ECHO -------------------------------------------------------------------------------
+ECHO boost = %BOOST_VERSION%
+ECHO libevent = %LIBEVENT_VERSION%
+ECHO python = %PYTHON_VERSION%
+ECHO qt = %QT_VERSION%
+ECHO zlib = %ZLIB_VERSION%
+ECHO/
+)
+ECHO Appveyor Variables
+ECHO -------------------------------------------------------------------------------
+ECHO APPVEYOR_BUILD_FOLDER = %APPVEYOR_BUILD_FOLDER%
+ECHO CONFIGURATION = %CONFIGURATION%
+ECHO PLATFORM = %PLATFORM%
+ECHO PROFILE = %PROFILE%
+ECHO/
+ECHO Our Variables
+ECHO -------------------------------------------------------------------------------
+ECHO APPVEYOR_SCRIPTS = %APPVEYOR_SCRIPTS%
+ECHO BOOST_ROOT = %BOOST_ROOT%
+ECHO BOOST_INCLUDEDIR = %BOOST_INCLUDEDIR%
+ECHO BOOST_LIBRARYDIR = %BOOST_LIBRARYDIR%
+ECHO BUILDCACHE = %BUILDCACHE%
+ECHO BUILDDIR = %BUILDDIR%
+ECHO COMPILER = %COMPILER%
+ECHO GENERATOR = %GENERATOR%
+ECHO INSTDIR = %INSTDIR%
+ECHO JAVA_HOME = %JAVA_HOME%
+ECHO OPENSSL_ROOT = %OPENSSL_ROOT%
+ECHO SRCDIR = %SRCDIR%
+ECHO WIN3P = %WIN3P%
+ECHO WITH_PYTHON = %WITH_PYTHON%
+ECHO ZLIB_STATIC_SUFFIX = %ZLIB_STATIC_SUFFIX%
+IF NOT "%PROFILE:~0,4%" == "MSVC" (
+ECHO/
+ECHO MSYS2/MinGW
+ECHO -------------------------------------------------------------------------------
+ECHO BUILDDIR_MSYS = %BUILDDIR_MSYS%
+ECHO INSTDIR_MSYS = %INSTDIR_MSYS%
+ECHO MSYS2_PATH_TYPE = %MSYS2_PATH_TYPE%
+ECHO SRCDIR_MSYS = %SRCDIR_MSYS%
+ECHO PATH =
+C:\msys64\usr\bin\bash -lc "echo $PATH"
+)
+ECHO/
+ECHO Windows PATH
+ECHO -------------------------------------------------------------------------------
+ECHO %PATH%
+ECHO ===============================================================================
+ECHO/
diff --git a/vendor/github.com/apache/thrift/build/appveyor/simulate-appveyor.bat b/vendor/github.com/apache/thrift/build/appveyor/simulate-appveyor.bat
new file mode 100644
index 000000000..b32c0da12
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/appveyor/simulate-appveyor.bat
@@ -0,0 +1,35 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Helps build thrift by pretending to be appveyor
+:: Usage:
+:: cd build\appveyor
+:: simulate-appveyor.bat [Debug|Release] [x86|x64] [MINGW|MSVC2015]
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+SET APPVEYOR_BUILD_FOLDER=%~dp0..\..
+SET CONFIGURATION=%1
+SET PLATFORM=%2
+SET PROFILE=%3
+
+CD %APPVEYOR_BUILD_FOLDER%
+CALL build\appveyor\%PROFILE:~0,4%-appveyor-install.bat || EXIT /B
+CD %APPVEYOR_BUILD_FOLDER%
+CALL build\appveyor\%PROFILE:~0,4%-appveyor-build.bat || EXIT /B
+CD %APPVEYOR_BUILD_FOLDER%
+CALL build\appveyor\%PROFILE:~0,4%-appveyor-test.bat
diff --git a/vendor/github.com/apache/thrift/build/cmake/CPackConfig.cmake b/vendor/github.com/apache/thrift/build/cmake/CPackConfig.cmake
new file mode 100644
index 000000000..fdc1b4e76
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/CPackConfig.cmake
@@ -0,0 +1,68 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+#TODO: Should we bundle system libraries for DLLs?
+#include(InstallRequiredSystemLibraries)
+
+# For help take a look at:
+# http://www.cmake.org/Wiki/CMake:CPackConfiguration
+
+### general settings
+set(CPACK_PACKAGE_NAME "thrift")
+set(CPACK_PACKAGE_VERSION "${PACKAGE_VERSION}")
+set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Apache Thrift")
+set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
+set(CPACK_PACKAGE_VENDOR "Apache Software Foundation")
+set(CPACK_PACKAGE_CONTACT "dev@thrift.apache.org")
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_NAME}")
+set(CPACK_SYSTEM_NAME "${CMAKE_SYSTEM_NAME}")
+
+### versions
+set(CPACK_PACKAGE_VERSION_MAJOR ${thrift_VERSION_MAJOR})
+set(CPACK_PACKAGE_VERSION_MINOR ${thrift_VERSION_MINOR})
+set(CPACK_PACKAGE_VERSION_PATCH ${thrift_VERSION_PATCH})
+
+### source generator
+set(CPACK_SOURCE_GENERATOR "TGZ")
+set(CPACK_SOURCE_IGNORE_FILES "~$;[.]swp$;/[.]svn/;/[.]git/;.gitignore;/build/;tags;cscope.*")
+set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}")
+
+### zip generator
+set(CPACK_GENERATOR "ZIP")
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "thrift")
+
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ set(CPACK_GENERATOR "NSIS")
+ set(CPACK_NSIS_HELP_LINK "http://thrift.apache.org")
+ set(CPACK_NSIS_MENU_LINKS
+ "http://thrift.apache.org" "Apache Thrift - Web Site"
+ "https://issues.apache.org/jira/browse/THRIFT" "Apache Thrift - Issues")
+ set(CPACK_NSIS_CONTACT ${CPACK_PACKAGE_CONTACT})
+ set(CPACK_NSIS_MODIFY_PATH "ON")
+ set(CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_NAME}")
+else()
+ set(CPACK_GENERATOR "DEB" )
+ set(CPACK_DEBIAN_PACKAGE_MAINTAINER ${CPACK_PACKAGE_CONTACT})
+endif()
+
+
+include(CPack)
diff --git a/vendor/github.com/apache/thrift/build/cmake/ConfigureChecks.cmake b/vendor/github.com/apache/thrift/build/cmake/ConfigureChecks.cmake
new file mode 100644
index 000000000..12a50df91
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/ConfigureChecks.cmake
@@ -0,0 +1,76 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+include(CheckFunctionExists)
+include(CheckIncludeFile)
+include(CheckIncludeFiles)
+include(CheckSymbolExists)
+
+if (Inttypes_FOUND)
+ # This allows the inttypes.h and stdint.h checks to succeed on platforms that
+ # do not natively provide there.
+ set (CMAKE_REQUIRED_INCLUDES ${INTTYPES_INCLUDE_DIRS})
+endif ()
+
+check_include_file(arpa/inet.h HAVE_ARPA_INET_H)
+check_include_file(fcntl.h HAVE_FCNTL_H)
+check_include_file(getopt.h HAVE_GETOPT_H)
+check_include_file(inttypes.h HAVE_INTTYPES_H)
+check_include_file(netdb.h HAVE_NETDB_H)
+check_include_file(netinet/in.h HAVE_NETINET_IN_H)
+check_include_file(stdint.h HAVE_STDINT_H)
+check_include_file(unistd.h HAVE_UNISTD_H)
+check_include_file(pthread.h HAVE_PTHREAD_H)
+check_include_file(sys/time.h HAVE_SYS_TIME_H)
+check_include_file(sys/param.h HAVE_SYS_PARAM_H)
+check_include_file(sys/resource.h HAVE_SYS_RESOURCE_H)
+check_include_file(sys/socket.h HAVE_SYS_SOCKET_H)
+check_include_file(sys/stat.h HAVE_SYS_STAT_H)
+check_include_file(sys/un.h HAVE_SYS_UN_H)
+check_include_file(sys/poll.h HAVE_SYS_POLL_H)
+check_include_file(sys/select.h HAVE_SYS_SELECT_H)
+check_include_file(sched.h HAVE_SCHED_H)
+check_include_file(string.h HAVE_STRING_H)
+check_include_file(strings.h HAVE_STRINGS_H)
+
+check_function_exists(gethostbyname HAVE_GETHOSTBYNAME)
+check_function_exists(gethostbyname_r HAVE_GETHOSTBYNAME_R)
+check_function_exists(strerror_r HAVE_STRERROR_R)
+check_function_exists(sched_get_priority_max HAVE_SCHED_GET_PRIORITY_MAX)
+check_function_exists(sched_get_priority_min HAVE_SCHED_GET_PRIORITY_MIN)
+
+include(CheckCSourceCompiles)
+include(CheckCXXSourceCompiles)
+
+check_cxx_source_compiles(
+ "
+ #include
+ int main(){char b;char *a = strerror_r(0, &b, 0); return(0);}
+ "
+ STRERROR_R_CHAR_P)
+
+
+set(PACKAGE ${PACKAGE_NAME})
+set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
+set(VERSION ${thrift_VERSION})
+
+# generate a config.h file
+configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h")
+
+include_directories("${CMAKE_CURRENT_BINARY_DIR}")
diff --git a/vendor/github.com/apache/thrift/build/cmake/DefineCMakeDefaults.cmake b/vendor/github.com/apache/thrift/build/cmake/DefineCMakeDefaults.cmake
new file mode 100644
index 000000000..365c0a434
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/DefineCMakeDefaults.cmake
@@ -0,0 +1,87 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Always include srcdir and builddir in include path
+# This saves typing ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY} in
+# about every subdir
+# since cmake 2.4.0
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+# Put the include dirs which are in the source or build tree
+# before all other include dirs, so the headers in the sources
+# are preferred over the already installed ones
+# since cmake 2.4.1
+set(CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON)
+
+# Use colored output
+# since cmake 2.4.0
+set(CMAKE_COLOR_MAKEFILE ON)
+
+# Define the generic version of the libraries here
+set(GENERIC_LIB_VERSION "0.1.0")
+set(GENERIC_LIB_SOVERSION "0")
+
+# Set the default build type to release with debug info
+if (NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE RelWithDebInfo
+ CACHE STRING
+ "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
+ )
+endif (NOT CMAKE_BUILD_TYPE)
+
+# Create the compile command database for clang by default
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
+# Put the libraries and binaries that get built into directories at the
+# top of the build tree rather than in hard-to-find leaf
+# directories. This simplifies manual testing and the use of the build
+# tree rather than installed thrift libraries.
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+#
+# "rpath" support.
+# See http://www.itk.org/Wiki/index.php?title=CMake_RPATH_handling
+#
+# On MacOSX, for shared libraries, enable rpath support.
+set(CMAKE_MACOSX_RPATH TRUE)
+#
+# On any OS, for executables, allow linking with shared libraries in non-system
+# locations and running the executables without LD_PRELOAD or similar.
+# This requires the library to be built with rpath support.
+set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+
+#
+# C++ Language Level Defaults
+#
+if (NOT DEFINED CMAKE_CXX_STANDARD)
+ set(CMAKE_CXX_STANDARD 11) # C++11
+ message(STATUS "Setting C++11 as the default language level.")
+ message(STATUS "To specify a different C++ language level, set CMAKE_CXX_STANDARD")
+endif()
+
+if (NOT DEFINED CMAKE_CXX_STANDARD_REQUIRED)
+ set(CMAKE_CXX_STANDARD_REQUIRED OFF) # can degrade to C++98 if compiler does not support C++11
+endif()
+
+if (NOT DEFINED CMAKE_CXX_EXTENSIONS)
+ set(CMAKE_CXX_EXTENSIONS OFF) # use standards compliant language level for portability
+endif()
diff --git a/vendor/github.com/apache/thrift/build/cmake/DefineInstallationPaths.cmake b/vendor/github.com/apache/thrift/build/cmake/DefineInstallationPaths.cmake
new file mode 100644
index 000000000..122f0f6a9
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/DefineInstallationPaths.cmake
@@ -0,0 +1,26 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define the default install paths
+set(BIN_INSTALL_DIR "bin" CACHE PATH "The binary install dir (default: bin)")
+set(LIB_INSTALL_DIR "lib${LIB_SUFFIX}" CACHE PATH "The library install dir (default: lib${LIB_SUFFIX})")
+set(INCLUDE_INSTALL_DIR "include" CACHE PATH "The library install dir (default: include)")
+set(CMAKE_INSTALL_DIR "cmake" CACHE PATH "The subdirectory to install cmake config files (default: cmake)")
+set(DOC_INSTALL_DIR "share/doc" CACHE PATH "The subdirectory to install documentation files (default: share/doc)")
diff --git a/vendor/github.com/apache/thrift/build/cmake/DefineOptions.cmake b/vendor/github.com/apache/thrift/build/cmake/DefineOptions.cmake
new file mode 100644
index 000000000..63981e94d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/DefineOptions.cmake
@@ -0,0 +1,207 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+include(CMakeDependentOption)
+
+set(THRIFT_COMPILER "" CACHE FILEPATH "External Thrift compiler to use during build")
+
+# Additional components
+option(BUILD_COMPILER "Build Thrift compiler" ON)
+
+if(BUILD_COMPILER OR EXISTS ${THRIFT_COMPILER})
+ set(HAVE_COMPILER ON)
+endif()
+CMAKE_DEPENDENT_OPTION(BUILD_TESTING "Build with unit tests" ON "HAVE_COMPILER" OFF)
+CMAKE_DEPENDENT_OPTION(BUILD_EXAMPLES "Build examples" ON "HAVE_COMPILER" OFF)
+CMAKE_DEPENDENT_OPTION(BUILD_TUTORIALS "Build Thrift tutorials" ON "HAVE_COMPILER" OFF)
+option(BUILD_LIBRARIES "Build Thrift libraries" ON)
+
+# Libraries to build
+
+# Each language library can be enabled or disabled using the WITH_ flag.
+# By default CMake checks if the required dependencies for a language are present
+# and enables the library if all are found. This means the default is to build as
+# much as possible but leaving out libraries if their dependencies are not met.
+
+option(WITH_BOOST_STATIC "Build with Boost static link library" OFF)
+set(Boost_USE_STATIC_LIBS ${WITH_BOOST_STATIC})
+if (NOT WITH_BOOST_STATIC)
+ add_definitions(-DBOOST_ALL_DYN_LINK)
+ add_definitions(-DBOOST_TEST_DYN_LINK)
+endif()
+
+# C++
+option(WITH_CPP "Build C++ Thrift library" ON)
+if(WITH_CPP)
+ find_package(Boost 1.53 QUIET)
+ # NOTE: Currently the following options are C++ specific,
+ # but in future other libraries might reuse them.
+ # So they are not dependent on WITH_CPP but setting them without WITH_CPP currently
+ # has no effect.
+ if(ZLIB_LIBRARY)
+ # FindZLIB.cmake does not normalize path so we need to do it ourselves.
+ file(TO_CMAKE_PATH ${ZLIB_LIBRARY} ZLIB_LIBRARY)
+ endif()
+ find_package(ZLIB QUIET)
+ CMAKE_DEPENDENT_OPTION(WITH_ZLIB "Build with ZLIB support" ON
+ "ZLIB_FOUND" OFF)
+ find_package(Libevent QUIET)
+ CMAKE_DEPENDENT_OPTION(WITH_LIBEVENT "Build with libevent support" ON
+ "Libevent_FOUND" OFF)
+ find_package(Qt4 QUIET COMPONENTS QtCore QtNetwork)
+ CMAKE_DEPENDENT_OPTION(WITH_QT4 "Build with Qt4 support" ON
+ "QT4_FOUND" OFF)
+ find_package(Qt5 QUIET COMPONENTS Core Network)
+ CMAKE_DEPENDENT_OPTION(WITH_QT5 "Build with Qt5 support" ON
+ "Qt5_FOUND" OFF)
+ if(${WITH_QT4} AND ${WITH_QT5} AND ${CMAKE_MAJOR_VERSION} LESS 3)
+ # cmake < 3.0.0 causes conflict when building both Qt4 and Qt5
+ set(WITH_QT4 OFF)
+ endif()
+ find_package(OpenSSL QUIET)
+ CMAKE_DEPENDENT_OPTION(WITH_OPENSSL "Build with OpenSSL support" ON
+ "OPENSSL_FOUND" OFF)
+ option(WITH_STDTHREADS "Build with C++ std::thread support" OFF)
+ CMAKE_DEPENDENT_OPTION(WITH_BOOSTTHREADS "Build with Boost threads support" OFF
+ "NOT WITH_STDTHREADS;Boost_FOUND" OFF)
+endif()
+CMAKE_DEPENDENT_OPTION(BUILD_CPP "Build C++ library" ON
+ "BUILD_LIBRARIES;WITH_CPP;Boost_FOUND" OFF)
+CMAKE_DEPENDENT_OPTION(WITH_PLUGIN "Build compiler plugin support" OFF
+ "BUILD_COMPILER;BUILD_CPP" OFF)
+
+# C GLib
+option(WITH_C_GLIB "Build C (GLib) Thrift library" ON)
+if(WITH_C_GLIB)
+ find_package(GLIB QUIET COMPONENTS gobject)
+endif()
+CMAKE_DEPENDENT_OPTION(BUILD_C_GLIB "Build C (GLib) library" ON
+ "BUILD_LIBRARIES;WITH_C_GLIB;GLIB_FOUND" OFF)
+
+if(BUILD_CPP)
+ set(boost_components)
+ if(WITH_BOOSTTHREADS OR BUILD_TESTING)
+ list(APPEND boost_components system thread)
+ endif()
+ if(BUILD_TESTING)
+ list(APPEND boost_components unit_test_framework filesystem chrono program_options)
+ endif()
+ if(boost_components)
+ find_package(Boost 1.53 REQUIRED COMPONENTS ${boost_components})
+ endif()
+elseif(BUILD_C_GLIB AND BUILD_TESTING)
+ find_package(Boost 1.53 REQUIRED)
+endif()
+
+# Java
+option(WITH_JAVA "Build Java Thrift library" ON)
+if(ANDROID)
+ find_package(Gradle QUIET)
+ CMAKE_DEPENDENT_OPTION(BUILD_JAVA "Build Java library" ON
+ "BUILD_LIBRARIES;WITH_JAVA;GRADLE_FOUND" OFF)
+else()
+ find_package(Java QUIET)
+ find_package(Ant QUIET)
+ CMAKE_DEPENDENT_OPTION(BUILD_JAVA "Build Java library" ON
+ "BUILD_LIBRARIES;WITH_JAVA;JAVA_FOUND;ANT_FOUND" OFF)
+endif()
+
+# Python
+option(WITH_PYTHON "Build Python Thrift library" ON)
+find_package(PythonInterp QUIET) # for Python executable
+find_package(PythonLibs QUIET) # for Python.h
+CMAKE_DEPENDENT_OPTION(BUILD_PYTHON "Build Python library" ON
+ "BUILD_LIBRARIES;WITH_PYTHON;PYTHONLIBS_FOUND" OFF)
+
+# Haskell
+option(WITH_HASKELL "Build Haskell Thrift library" ON)
+find_package(GHC QUIET)
+find_package(Cabal QUIET)
+CMAKE_DEPENDENT_OPTION(BUILD_HASKELL "Build GHC library" ON
+ "BUILD_LIBRARIES;WITH_HASKELL;GHC_FOUND;CABAL_FOUND" OFF)
+
+# Common library options
+option(WITH_SHARED_LIB "Build shared libraries" ON)
+option(WITH_STATIC_LIB "Build static libraries" ON)
+if (NOT WITH_SHARED_LIB AND NOT WITH_STATIC_LIB)
+ message(FATAL_ERROR "Cannot build with both shared and static outputs disabled!")
+endif()
+
+#NOTE: C++ compiler options are defined in the lib/cpp/CMakeLists.txt
+
+# Visual Studio only options
+if(MSVC)
+option(WITH_MT "Build using MT instead of MD (MSVC only)" OFF)
+endif(MSVC)
+
+macro(MESSAGE_DEP flag summary)
+if(NOT ${flag})
+ message(STATUS " - ${summary}")
+endif()
+endmacro(MESSAGE_DEP flag summary)
+
+macro(PRINT_CONFIG_SUMMARY)
+message(STATUS "----------------------------------------------------------")
+message(STATUS "Thrift version: ${thrift_VERSION} (${thrift_VERSION_MAJOR}.${thrift_VERSION_MINOR}.${thrift_VERSION_PATCH})")
+message(STATUS "Thrift package version: ${PACKAGE_VERSION}")
+message(STATUS "Build configuration Summary")
+message(STATUS " Build Thrift compiler: ${BUILD_COMPILER}")
+message(STATUS " Build compiler plugin support: ${WITH_PLUGIN}")
+message(STATUS " Build with unit tests: ${BUILD_TESTING}")
+MESSAGE_DEP(HAVE_COMPILER "Disabled because BUILD_THRIFT=OFF and no valid THRIFT_COMPILER is given")
+message(STATUS " Build examples: ${BUILD_EXAMPLES}")
+MESSAGE_DEP(HAVE_COMPILER "Disabled because BUILD_THRIFT=OFF and no valid THRIFT_COMPILER is given")
+message(STATUS " Build Thrift libraries: ${BUILD_LIBRARIES}")
+message(STATUS " Language libraries:")
+message(STATUS " Build C++ library: ${BUILD_CPP}")
+MESSAGE_DEP(WITH_CPP "Disabled by WITH_CPP=OFF")
+MESSAGE_DEP(Boost_FOUND "Boost headers missing")
+message(STATUS " C++ Language Level: ${CXX_LANGUAGE_LEVEL}")
+message(STATUS " Build C (GLib) library: ${BUILD_C_GLIB}")
+MESSAGE_DEP(WITH_C_GLIB "Disabled by WITH_C_GLIB=OFF")
+MESSAGE_DEP(GLIB_FOUND "GLib missing")
+message(STATUS " Build Java library: ${BUILD_JAVA}")
+MESSAGE_DEP(WITH_JAVA "Disabled by WITH_JAVA=OFF")
+if(ANDROID)
+ MESSAGE_DEP(GRADLE_FOUND "Gradle missing")
+else()
+ MESSAGE_DEP(JAVA_FOUND "Java Runtime missing")
+ MESSAGE_DEP(ANT_FOUND "Ant missing")
+endif()
+message(STATUS " Build Python library: ${BUILD_PYTHON}")
+MESSAGE_DEP(WITH_PYTHON "Disabled by WITH_PYTHON=OFF")
+MESSAGE_DEP(PYTHONLIBS_FOUND "Python libraries missing")
+message(STATUS " Build Haskell library: ${BUILD_HASKELL}")
+MESSAGE_DEP(WITH_HASKELL "Disabled by WITH_HASKELL=OFF")
+MESSAGE_DEP(GHC_FOUND "GHC missing")
+MESSAGE_DEP(CABAL_FOUND "Cabal missing")
+message(STATUS " Library features:")
+message(STATUS " Build shared libraries: ${WITH_SHARED_LIB}")
+message(STATUS " Build static libraries: ${WITH_STATIC_LIB}")
+message(STATUS " Build with Boost static link library: ${WITH_BOOST_STATIC}")
+message(STATUS " Build with Boost thread support: ${WITH_BOOSTTHREADS}")
+message(STATUS " Build with C++ std::thread support: ${WITH_STDTHREADS}")
+message(STATUS " Build with libevent support: ${WITH_LIBEVENT}")
+message(STATUS " Build with OpenSSL support: ${WITH_OPENSSL}")
+message(STATUS " Build with Qt4 support: ${WITH_QT4}")
+message(STATUS " Build with Qt5 support: ${WITH_QT5}")
+message(STATUS " Build with ZLIB support: ${WITH_ZLIB}")
+message(STATUS "----------------------------------------------------------")
+endmacro(PRINT_CONFIG_SUMMARY)
diff --git a/vendor/github.com/apache/thrift/build/cmake/DefinePlatformSpecifc.cmake b/vendor/github.com/apache/thrift/build/cmake/DefinePlatformSpecifc.cmake
new file mode 100644
index 000000000..d5d27e2d0
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/DefinePlatformSpecifc.cmake
@@ -0,0 +1,124 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Uncomment this to show some basic cmake variables about platforms
+# include (NewPlatformDebug)
+
+# Visual Studio specific options
+if(MSVC)
+ #For visual studio the library naming is as following:
+ # Dynamic libraries:
+ # - thrift.dll for release library
+ # - thriftd.dll for debug library
+ #
+ # Static libraries:
+ # - thriftmd.lib for /MD release build
+ # - thriftmt.lib for /MT release build
+ #
+ # - thriftmdd.lib for /MD debug build
+ # - thriftmtd.lib for /MT debug build
+ #
+ # the same holds for other libraries like libthriftz etc.
+
+ # For Debug build types, append a "d" to the library names.
+ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Set debug library postfix" FORCE)
+ set(CMAKE_RELEASE_POSTFIX "" CACHE STRING "Set release library postfix" FORCE)
+ set(CMAKE_RELWITHDEBINFO_POSTFIX "" CACHE STRING "Set release library postfix" FORCE)
+
+ # Build using /MT option instead of /MD if the WITH_MT options is set
+ if(WITH_MT)
+ set(CompilerFlags
+ CMAKE_CXX_FLAGS
+ CMAKE_CXX_FLAGS_DEBUG
+ CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_RELWITHDEBINFO
+ CMAKE_C_FLAGS
+ CMAKE_C_FLAGS_DEBUG
+ CMAKE_C_FLAGS_RELEASE
+ CMAKE_C_FLAGS_RELWITHDEBINFO
+ )
+ foreach(CompilerFlag ${CompilerFlags})
+ string(REPLACE "/MD" "/MT" ${CompilerFlag} "${${CompilerFlag}}")
+ endforeach()
+ set(STATIC_POSTFIX "mt" CACHE STRING "Set static library postfix" FORCE)
+ else(WITH_MT)
+ set(STATIC_POSTFIX "md" CACHE STRING "Set static library postfix" FORCE)
+ endif(WITH_MT)
+
+ # Disable Windows.h definition of macros for min and max
+ add_definitions("-DNOMINMAX")
+
+ # Disable boost auto linking pragmas - cmake includes the right files
+ add_definitions("-DBOOST_ALL_NO_LIB")
+
+ # Windows build does not know how to make a shared library yet
+ # as there are no __declspec(dllexport) or exports files in the project.
+ if (WITH_SHARED_LIB)
+ message (FATAL_ERROR "Windows build does not support shared library output yet, please set -DWITH_SHARED_LIB=off")
+ endif()
+
+ add_definitions("/MP") # parallel build
+ add_definitions("/W3") # warning level 3
+
+ # VS2010 does not provide inttypes which we need for "PRId64" used in many places
+ find_package(Inttypes)
+ if (Inttypes_FOUND)
+ include_directories(${INTTYPES_INCLUDE_DIRS})
+ # OpenSSL conflicts with the definition of PRId64 unless it is defined first
+ add_definitions("/FIinttypes.h")
+ endif ()
+elseif(UNIX)
+ find_program( MEMORYCHECK_COMMAND valgrind )
+ set( MEMORYCHECK_COMMAND_OPTIONS "--gen-suppressions=all --leak-check=full" )
+ set( MEMORYCHECK_SUPPRESSIONS_FILE "${PROJECT_SOURCE_DIR}/test/valgrind.suppress" )
+endif()
+
+add_definitions("-D__STDC_FORMAT_MACROS")
+
+# WITH_*THREADS selects which threading library to use
+if(WITH_BOOSTTHREADS)
+ add_definitions("-DUSE_BOOST_THREAD=1")
+elseif(WITH_STDTHREADS)
+ add_definitions("-DUSE_STD_THREAD=1")
+endif()
+
+# C++ Language Level
+set(CXX_LANGUAGE_LEVEL "C++${CMAKE_CXX_STANDARD}")
+if (CMAKE_CXX_STANDARD_REQUIRED)
+ string(CONCAT CXX_LANGUAGE_LEVEL "${CXX_LANGUAGE_LEVEL} [compiler must support it]")
+else()
+ string(CONCAT CXX_LANGUAGE_LEVEL "${CXX_LANGUAGE_LEVEL} [fallback to earlier if compiler does not support it]")
+endif()
+if (CMAKE_CXX_EXTENSIONS)
+ string(CONCAT CXX_LANGUAGE_LEVEL "${CXX_LANGUAGE_LEVEL} [with compiler-specific extensions]")
+else()
+ if ((CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND NOT MINGW)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros -Wno-long-long -Wno-c++11-long-long")
+ endif()
+endif()
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-register")
+endif()
+
+# If gcc older than 4.8 is detected and plugin support was requested, fail fast
+if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8" AND WITH_PLUGIN)
+ message(SEND_ERROR "Thrift compiler plug-in support is not possible with older gcc ( < 4.8 ) compiler")
+endif()
+
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindAnt.cmake b/vendor/github.com/apache/thrift/build/cmake/FindAnt.cmake
new file mode 100644
index 000000000..8b0371d91
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindAnt.cmake
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# ANT_FOUND - system has Ant
+# Ant_EXECUTABLE - the Ant executable
+#
+# It will search the environment variable ANT_HOME if it is set
+
+include(FindPackageHandleStandardArgs)
+
+find_program(Ant_EXECUTABLE NAMES ant PATHS $ENV{ANT_HOME}/bin)
+find_package_handle_standard_args(Ant DEFAULT_MSG Ant_EXECUTABLE)
+mark_as_advanced(Ant_EXECUTABLE)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindCabal.cmake b/vendor/github.com/apache/thrift/build/cmake/FindCabal.cmake
new file mode 100644
index 000000000..fed337bd4
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindCabal.cmake
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Cabal_FOUND - system has Cabal
+# Cabal - the Cabal executable
+#
+# It will search the environment variable CABAL_HOME if it is set
+
+include(FindPackageHandleStandardArgs)
+
+find_program(CABAL NAMES cabal PATHS $ENV{HOME}/.cabal/bin $ENV{CABAL_HOME}/bin)
+find_package_handle_standard_args(CABAL DEFAULT_MSG CABAL)
+mark_as_advanced(CABAL)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindGHC.cmake b/vendor/github.com/apache/thrift/build/cmake/FindGHC.cmake
new file mode 100644
index 000000000..48738472c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindGHC.cmake
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# GHC_FOUND - system has GHC
+# GHC - the GHC executable
+# RUN_HASKELL_FOUND - system has runhaskell
+# RUN_HASKELL - the runhaskell executable
+#
+# It will search the environment variable GHC_HOME if it is set
+
+include(FindPackageHandleStandardArgs)
+
+find_program(GHC NAMES ghc PATHS $ENV{GHC_HOME}/bin)
+find_package_handle_standard_args(GHC DEFAULT_MSG GHC)
+mark_as_advanced(GHC)
+
+find_program(RUN_HASKELL NAMES runhaskell PATHS $ENV{GHC_HOME}/bin)
+find_package_handle_standard_args(RUN_HASKELL DEFAULT_MSG RUN_HASKELL)
+mark_as_advanced(RUN_HASKELL)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindGLIB.cmake b/vendor/github.com/apache/thrift/build/cmake/FindGLIB.cmake
new file mode 100644
index 000000000..acbe433e3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindGLIB.cmake
@@ -0,0 +1,122 @@
+# - Try to find Glib and its components (gio, gobject etc)
+# Once done, this will define
+#
+# GLIB_FOUND - system has Glib
+# GLIB_INCLUDE_DIRS - the Glib include directories
+# GLIB_LIBRARIES - link these to use Glib
+#
+# Optionally, the COMPONENTS keyword can be passed to find_package()
+# and Glib components can be looked for. Currently, the following
+# components can be used, and they define the following variables if
+# found:
+#
+# gio: GLIB_GIO_LIBRARIES
+# gobject: GLIB_GOBJECT_LIBRARIES
+# gmodule: GLIB_GMODULE_LIBRARIES
+# gthread: GLIB_GTHREAD_LIBRARIES
+#
+# Note that the respective _INCLUDE_DIR variables are not set, since
+# all headers are in the same directory as GLIB_INCLUDE_DIRS.
+#
+# Copyright (C) 2012 Raphael Kubo da Costa
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND ITS CONTRIBUTORS ``AS
+# IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ITS
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+find_package(PkgConfig)
+pkg_check_modules(PC_GLIB QUIET glib-2.0)
+
+find_library(GLIB_LIBRARIES
+ NAMES glib-2.0
+ HINTS ${PC_GLIB_LIBDIR}
+ ${PC_GLIB_LIBRARY_DIRS}
+)
+
+# Files in glib's main include path may include glibconfig.h, which,
+# for some odd reason, is normally in $LIBDIR/glib-2.0/include.
+get_filename_component(_GLIB_LIBRARY_DIR ${GLIB_LIBRARIES} PATH)
+find_path(GLIBCONFIG_INCLUDE_DIR
+ NAMES glibconfig.h
+ HINTS ${PC_LIBDIR} ${PC_LIBRARY_DIRS} ${_GLIB_LIBRARY_DIR}
+ ${PC_GLIB_INCLUDEDIR} ${PC_GLIB_INCLUDE_DIRS}
+ PATH_SUFFIXES glib-2.0/include
+)
+
+find_path(GLIB_INCLUDE_DIR
+ NAMES glib.h
+ HINTS ${PC_GLIB_INCLUDEDIR}
+ ${PC_GLIB_INCLUDE_DIRS}
+ PATH_SUFFIXES glib-2.0
+)
+
+set(GLIB_INCLUDE_DIRS ${GLIB_INCLUDE_DIR} ${GLIBCONFIG_INCLUDE_DIR})
+
+if(GLIBCONFIG_INCLUDE_DIR)
+ # Version detection
+ file(READ "${GLIBCONFIG_INCLUDE_DIR}/glibconfig.h" GLIBCONFIG_H_CONTENTS)
+ string(REGEX MATCH "#define GLIB_MAJOR_VERSION ([0-9]+)" _dummy "${GLIBCONFIG_H_CONTENTS}")
+ set(GLIB_VERSION_MAJOR "${CMAKE_MATCH_1}")
+ string(REGEX MATCH "#define GLIB_MINOR_VERSION ([0-9]+)" _dummy "${GLIBCONFIG_H_CONTENTS}")
+ set(GLIB_VERSION_MINOR "${CMAKE_MATCH_1}")
+ string(REGEX MATCH "#define GLIB_MICRO_VERSION ([0-9]+)" _dummy "${GLIBCONFIG_H_CONTENTS}")
+ set(GLIB_VERSION_MICRO "${CMAKE_MATCH_1}")
+ set(GLIB_VERSION "${GLIB_VERSION_MAJOR}.${GLIB_VERSION_MINOR}.${GLIB_VERSION_MICRO}")
+endif()
+
+# Additional Glib components. We only look for libraries, as not all of them
+# have corresponding headers and all headers are installed alongside the main
+# glib ones.
+foreach (_component ${GLIB_FIND_COMPONENTS})
+ if (${_component} STREQUAL "gio")
+ find_library(GLIB_GIO_LIBRARIES NAMES gio-2.0 HINTS ${_GLIB_LIBRARY_DIR})
+ set(ADDITIONAL_REQUIRED_VARS ${ADDITIONAL_REQUIRED_VARS} GLIB_GIO_LIBRARIES)
+ elseif (${_component} STREQUAL "gobject")
+ find_library(GLIB_GOBJECT_LIBRARIES NAMES gobject-2.0 HINTS ${_GLIB_LIBRARY_DIR})
+ set(ADDITIONAL_REQUIRED_VARS ${ADDITIONAL_REQUIRED_VARS} GLIB_GOBJECT_LIBRARIES)
+ elseif (${_component} STREQUAL "gmodule")
+ find_library(GLIB_GMODULE_LIBRARIES NAMES gmodule-2.0 HINTS ${_GLIB_LIBRARY_DIR})
+ set(ADDITIONAL_REQUIRED_VARS ${ADDITIONAL_REQUIRED_VARS} GLIB_GMODULE_LIBRARIES)
+ elseif (${_component} STREQUAL "gthread")
+ find_library(GLIB_GTHREAD_LIBRARIES NAMES gthread-2.0 HINTS ${_GLIB_LIBRARY_DIR})
+ set(ADDITIONAL_REQUIRED_VARS ${ADDITIONAL_REQUIRED_VARS} GLIB_GTHREAD_LIBRARIES)
+ elseif (${_component} STREQUAL "gio-unix")
+ # gio-unix is compiled as part of the gio library, but the include paths
+ # are separate from the shared glib ones. Since this is currently only used
+ # by WebKitGTK+ we don't go to extraordinary measures beyond pkg-config.
+ pkg_check_modules(GIO_UNIX QUIET gio-unix-2.0)
+ endif ()
+endforeach ()
+
+include(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GLIB REQUIRED_VARS GLIB_INCLUDE_DIRS GLIB_LIBRARIES ${ADDITIONAL_REQUIRED_VARS}
+ VERSION_VAR GLIB_VERSION)
+
+mark_as_advanced(
+ GLIBCONFIG_INCLUDE_DIR
+ GLIB_GIO_LIBRARIES
+ GLIB_GIO_UNIX_LIBRARIES
+ GLIB_GMODULE_LIBRARIES
+ GLIB_GOBJECT_LIBRARIES
+ GLIB_GTHREAD_LIBRARIES
+ GLIB_INCLUDE_DIR
+ GLIB_INCLUDE_DIRS
+ GLIB_LIBRARIES
+)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindGradle.cmake b/vendor/github.com/apache/thrift/build/cmake/FindGradle.cmake
new file mode 100644
index 000000000..8845d697e
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindGradle.cmake
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# GRADLE_FOUND - system has Gradle
+# GRADLE_EXECUTABLE - the Gradle executable
+#
+# It will search the environment variable ANT_HOME if it is set
+
+include(FindPackageHandleStandardArgs)
+
+find_program(GRADLE_EXECUTABLE NAMES gradle PATHS $ENV{GRADLE_HOME}/bin NO_CMAKE_FIND_ROOT_PATH)
+find_package_handle_standard_args(Gradle DEFAULT_MSG GRADLE_EXECUTABLE)
+mark_as_advanced(GRADLE_EXECUTABLE)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindInttypes.cmake b/vendor/github.com/apache/thrift/build/cmake/FindInttypes.cmake
new file mode 100644
index 000000000..e661f7887
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindInttypes.cmake
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# find msinttypes on compilers that don't provide it, for example
+# VS2010
+
+# Usage:
+# Provide INTTYPES_ROOT if you need it
+# Result: INTTYPES_INCLUDE_DIRS, where to find inttypes.h
+# Result: Inttypes_FOUND, If false, inttypes.h was not found
+
+find_path(INTTYPES_INCLUDE_DIRS inttypes.h HINTS ${INTTYPES_ROOT})
+if (INTTYPES_INCLUDE_DIRS)
+ set(Inttypes_FOUND TRUE)
+else ()
+ set(Inttypes_FOUND FALSE)
+ if (Inttypes_FIND_REQUIRED)
+ message(FATAL_ERROR "Could NOT find inttypes.h")
+ endif ()
+ message(STATUS "inttypes.h NOT found")
+endif ()
+
+mark_as_advanced(
+ INTTYPES_INCLUDE_DIRS
+)
diff --git a/vendor/github.com/apache/thrift/build/cmake/FindLibevent.cmake b/vendor/github.com/apache/thrift/build/cmake/FindLibevent.cmake
new file mode 100644
index 000000000..ac6a078a1
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/FindLibevent.cmake
@@ -0,0 +1,45 @@
+# find LibEvent
+# an event notification library (http://libevent.org/)
+#
+# Usage:
+# LIBEVENT_INCLUDE_DIRS, where to find LibEvent headers
+# LIBEVENT_LIBRARIES, LibEvent libraries
+# Libevent_FOUND, If false, do not try to use libevent
+
+set(LIBEVENT_ROOT CACHE PATH "Root directory of libevent installation")
+set(LibEvent_EXTRA_PREFIXES /usr/local /opt/local "$ENV{HOME}" ${LIBEVENT_ROOT})
+foreach(prefix ${LibEvent_EXTRA_PREFIXES})
+ list(APPEND LibEvent_INCLUDE_PATHS "${prefix}/include")
+ list(APPEND LibEvent_LIBRARIES_PATHS "${prefix}/lib")
+endforeach()
+
+# Looking for "event.h" will find the Platform SDK include dir on windows
+# so we also look for a peer header like evhttp.h to get the right path
+find_path(LIBEVENT_INCLUDE_DIRS evhttp.h event.h PATHS ${LibEvent_INCLUDE_PATHS})
+
+# "lib" prefix is needed on Windows in some cases
+# newer versions of libevent use three libraries
+find_library(LIBEVENT_LIBRARIES NAMES event event_core event_extra libevent PATHS ${LibEvent_LIBRARIES_PATHS})
+
+if (LIBEVENT_LIBRARIES AND LIBEVENT_INCLUDE_DIRS)
+ set(Libevent_FOUND TRUE)
+ set(LIBEVENT_LIBRARIES ${LIBEVENT_LIBRARIES})
+else ()
+ set(Libevent_FOUND FALSE)
+endif ()
+
+if (Libevent_FOUND)
+ if (NOT Libevent_FIND_QUIETLY)
+ message(STATUS "Found libevent: ${LIBEVENT_LIBRARIES}")
+ endif ()
+else ()
+ if (LibEvent_FIND_REQUIRED)
+ message(FATAL_ERROR "Could NOT find libevent.")
+ endif ()
+ message(STATUS "libevent NOT found.")
+endif ()
+
+mark_as_advanced(
+ LIBEVENT_LIBRARIES
+ LIBEVENT_INCLUDE_DIRS
+ )
diff --git a/vendor/github.com/apache/thrift/build/cmake/NewPlatformDebug.cmake b/vendor/github.com/apache/thrift/build/cmake/NewPlatformDebug.cmake
new file mode 100644
index 000000000..76cac15c2
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/NewPlatformDebug.cmake
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# For debugging new platforms, just to see what some environment flags are...
+#
+macro(SHOWFLAG flag)
+ message(STATUS "${flag} = ${${flag}}")
+endmacro(SHOWFLAG)
+
+set(NEWPLATFORMDEBUG ON)
+
+if(NEWPLATFORMDEBUG)
+ SHOWFLAG("APPLE")
+ SHOWFLAG("BORLAND")
+ SHOWFLAG("CMAKE_C_COMPILER_ID")
+ SHOWFLAG("CMAKE_CXX_COMPILER_ID")
+ SHOWFLAG("CMAKE_COMPILER_IS_GNUCC")
+ SHOWFLAG("CMAKE_COMPILER_IS_GNUCXX")
+ SHOWFLAG("CYGWIN")
+ SHOWFLAG("MINGW")
+ SHOWFLAG("MSVC")
+ SHOWFLAG("MSYS")
+ SHOWFLAG("UNIX")
+ SHOWFLAG("WATCOM")
+ SHOWFLAG("WIN32")
+endif(NEWPLATFORMDEBUG)
diff --git a/vendor/github.com/apache/thrift/build/cmake/README-MSYS2.md b/vendor/github.com/apache/thrift/build/cmake/README-MSYS2.md
new file mode 100644
index 000000000..02679e615
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/README-MSYS2.md
@@ -0,0 +1,63 @@
+
+
+# Building thrift on Windows (MinGW64/MSYS2)
+
+Thrift uses cmake to make it easier to build the project on multiple platforms, however to build a fully functional and production ready thrift on Windows requires a number of third party libraries to be obtained. Once third party libraries are ready, the right combination of options must be passed to cmake in order to generate the correct environment.
+
+> Note: libevent and libevent-devel do not work with this toolchain as they do not properly detect mingw64 and expect some headers to exist that do not, so the non-blocking server is not currently built into this solution.
+
+## MSYS2
+
+Download and fully upgrade msys2 following the instructions at:
+
+ https://msys2.github.io/
+
+Install the necessary toolchain items for C++:
+
+ $ pacman --needed -S bison flex make mingw-w64-x86_64-openssl \
+ mingw-w64-x86_64-boost mingw-w64-x86_64-cmake \
+ mingw-w64-x86_64-toolchain mingw-w64-x86_64-zlib
+
+Update your msys2 bash path to include /mingw64/bin by adding a line to your ~/.bash_profiles using this command:
+
+ echo "export PATH=/mingw64/bin:\$PATH" >> ~/.bash_profile
+
+After that, close your shell and open a new one.
+
+Use cmake to create a MinGW makefile, out of tree (assumes you are in the top level of the thrift source tree):
+
+ mkdir ../thrift-build
+ cd ../thrift-build
+ cmake -G"MinGW Makefiles" -DCMAKE_MAKE_PROGRAM=/mingw64/bin/mingw32-make \
+ -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc.exe \
+ -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++.exe \
+ -DWITH_BOOSTTHREADS=ON -DWITH_LIBEVENT=OFF \
+ -DWITH_SHARED_LIB=OFF -DWITH_STATIC_LIB=ON \
+ -DWITH_JAVA=OFF -DWITH_PYTHON=OFF -DWITH_PERL=OFF \
+ ../thrift
+
+Build thrift (inside thrift-build):
+
+ cmake --build .
+
+Run the tests (inside thrift-build):
+
+ ctest
+
+> If you run into issues, check Apache Jira THRIFT-4046 for patches relating to MinGW64/MSYS2 builds.
+
+## Tested With
+
+msys2 64-bit 2016-10-26 distribution
diff --git a/vendor/github.com/apache/thrift/build/cmake/README.md b/vendor/github.com/apache/thrift/build/cmake/README.md
new file mode 100644
index 000000000..ebc4f7da1
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/README.md
@@ -0,0 +1,60 @@
+# Apache Thrift - CMake build
+
+## Goal
+Extend Apache Thrift's *make cross* approach to the build system.
+
+Due to growing the field of operating system support, a proper executable
+and library detection mechanism running on as much platforms as possible
+becomes required. The other aspect to simplify the release process and
+package generation process.
+
+As nice side benefit of CMake is the generation of development environment
+specific soultion files. => No solution files within source tree.
+
+
+## Usage
+just do this:
+
+ mkdir cmake-build && cd cmake-build
+ cmake ..
+
+if you use a specific toolchain pass it to cmake, the same for options:
+
+ cmake -DCMAKE_TOOLCHAIN_FILE=../build/cmake/mingw32-toolchain.cmake ..
+ cmake -DCMAKE_C_COMPILER=clang-3.5 -DCMAKE_CXX_COMPILER=clang++-3.5 ..
+ cmake -DTHRIFT_COMPILER_HS=OFF ..
+ cmake -DWITH_ZLIB=ON ..
+
+or on Windows
+
+ cmake -G "Visual Studio 12 2013 Win64" \
+ -DBOOST_ROOT=C:/3rdparty/boost_1_58_0 \
+ -DZLIB_ROOT=C:/3rdparty/zlib128-dll \
+ -DWITH_SHARED_LIB=off -DWITH_BOOSTTHREADS=ON ..
+
+and open the development environment you like with the solution or do this:
+
+ make
+ make check
+ make cross
+ make dist
+
+to generate an installer and distribution package do this:
+
+ cpack
+
+## TODO
+* git hash or tag based versioning depending on source state
+* build tutorial
+* build test
+* with/without language lib//
+* enable/disable
+* make cross
+* make dist (create an alias to make package_source)
+* make doc
+* cpack (C++ and make dist only ?)
+ * thrift-compiler
+ * libthrift
+ * tutorial
+ * test
+* merge into /README.md
diff --git a/vendor/github.com/apache/thrift/build/cmake/ThriftMacros.cmake b/vendor/github.com/apache/thrift/build/cmake/ThriftMacros.cmake
new file mode 100644
index 000000000..f837f9482
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/ThriftMacros.cmake
@@ -0,0 +1,105 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Set debug library postfix" FORCE)
+
+
+macro(ADD_LIBRARY_THRIFT name)
+
+if(WITH_SHARED_LIB)
+ add_library(${name} SHARED ${ARGN})
+ set_target_properties(${name} PROPERTIES
+ OUTPUT_NAME ${name}
+ VERSION ${thrift_VERSION}
+ SOVERSION ${thrift_VERSION} )
+ #set_target_properties(${name} PROPERTIES PUBLIC_HEADER "${thriftcpp_HEADERS}")
+ install(TARGETS ${name}
+ RUNTIME DESTINATION "${BIN_INSTALL_DIR}"
+ LIBRARY DESTINATION "${LIB_INSTALL_DIR}"
+ ARCHIVE DESTINATION "${LIB_INSTALL_DIR}"
+ PUBLIC_HEADER DESTINATION "${INCLUDE_INSTALL_DIR}")
+endif()
+
+if(WITH_STATIC_LIB)
+ add_library(${name}_static STATIC ${ARGN})
+ set_target_properties(${name}_static PROPERTIES
+ OUTPUT_NAME ${name}${STATIC_POSTFIX}
+ VERSION ${thrift_VERSION}
+ SOVERSION ${thrift_VERSION} )
+ install(TARGETS ${name}_static
+ RUNTIME DESTINATION "${BIN_INSTALL_DIR}"
+ LIBRARY DESTINATION "${LIB_INSTALL_DIR}"
+ ARCHIVE DESTINATION "${LIB_INSTALL_DIR}"
+ PUBLIC_HEADER DESTINATION "${INCLUDE_INSTALL_DIR}")
+endif()
+
+endmacro(ADD_LIBRARY_THRIFT)
+
+
+macro(TARGET_INCLUDE_DIRECTORIES_THRIFT name)
+
+if(WITH_SHARED_LIB)
+ target_include_directories(${name} ${ARGN})
+endif()
+
+if(WITH_STATIC_LIB)
+ target_include_directories(${name}_static ${ARGN})
+endif()
+
+endmacro(TARGET_INCLUDE_DIRECTORIES_THRIFT)
+
+
+macro(TARGET_LINK_LIBRARIES_THRIFT name)
+
+if(WITH_SHARED_LIB)
+ target_link_libraries(${name} ${ARGN})
+endif()
+
+if(WITH_STATIC_LIB)
+ target_link_libraries(${name}_static ${ARGN})
+endif()
+
+endmacro(TARGET_LINK_LIBRARIES_THRIFT)
+
+
+macro(LINK_AGAINST_THRIFT_LIBRARY target libname)
+
+if (WITH_SHARED_LIB)
+ target_link_libraries(${target} ${libname})
+elseif (WITH_STATIC_LIB)
+ target_link_libraries(${target} ${libname}_static)
+else()
+ message(FATAL "Not linking with shared or static libraries?")
+endif()
+
+endmacro(LINK_AGAINST_THRIFT_LIBRARY)
+
+
+macro(TARGET_LINK_LIBRARIES_THRIFT_AGAINST_THRIFT_LIBRARY target libname)
+
+if(WITH_SHARED_LIB)
+ target_link_libraries(${target} ${ARGN} ${libname})
+endif()
+
+if(WITH_STATIC_LIB)
+ target_link_libraries(${target}_static ${ARGN} ${libname}_static)
+endif()
+
+endmacro(TARGET_LINK_LIBRARIES_THRIFT_AGAINST_THRIFT_LIBRARY)
diff --git a/vendor/github.com/apache/thrift/build/cmake/android-toolchain.cmake b/vendor/github.com/apache/thrift/build/cmake/android-toolchain.cmake
new file mode 100644
index 000000000..15f3d002a
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/android-toolchain.cmake
@@ -0,0 +1,26 @@
+set(ANDROID_NDK "/opt/android-ndk" CACHE)
+set(ANDROID_PLATFORM "android-15" CACHE)
+set(ANDROID_ARCH "arch-arm" CACHE)
+set(ANDROID_TOOL_ARCH "android-arm" CACHE)
+set(ANDROID_CPU "armeabi-v7a" CACHE)
+set(ANDROID_GCC_VERSION 4.9 CACHE)
+set(HOST_ARCH linux-x86_64 CACHE)
+
+set(CMAKE_SYSTEM_NAME Android)
+set(ANDROID_SYSROOT "${ANDROID_NDK}/platforms/${ANDROID_PLATFORM}/${ANDROID_ARCH}")
+set(ANDROID_TRIPLET arm-linux-androideabi)
+set(ANDROID_STL "${ANDROID_NDK}/sources/cxx-stl/gnu-libstd++/${ANDROID_GCC_VERSION}")
+
+set(_COMPILER_ROOT ${ANDROID_NDK}/prebuilt/${ANDROID_TRIPLET}-${ANDROID_GCC_VERSION}/prebuilt/${HOST_ARCH})
+set(CMAKE_C_COMPILER ${_COMPILER_ROOT}/bin/${ANDROID_TRIPLET}-gcc)
+set(CMAKE_CXCX_COMPILER ${_COMPILER_ROOT}/bin/${ANDROID_TRIPLET}-g++)
+
+include_directories(
+ ${ANDROID_STL}/include
+ ${ANDROID_STL}/libs/${ANDROID_CPU}/include)
+
+set(CMAKE_FIND_ROOT_PATH ${ANDROID_SYSROOT})
+
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/vendor/github.com/apache/thrift/build/cmake/config.h.in b/vendor/github.com/apache/thrift/build/cmake/config.h.in
new file mode 100644
index 000000000..083bc55ec
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/config.h.in
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* config.h generated by CMake from config.h.in */
+
+#ifndef CONFIG_H
+#define CONFIG_H
+
+
+/* Name of package */
+#cmakedefine PACKAGE "${PACKAGE}"
+
+/* Define to the address where bug reports for this package should be sent. */
+#cmakedefine PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}"
+
+/* Define to the full name of this package. */
+#cmakedefine PACKAGE_NAME "${PACKAGE_NAME}"
+
+/* Define to the one symbol short name of this package. */
+#cmakedefine PACKAGE_TARNAME "${PACKAGE_TARNAME}"
+
+/* Define to the home page for this package. */
+#cmakedefine PACKAGE_URL "${PACKAGE_URL}"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "${PACKAGE_VERSION}"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "${PACKAGE_STRING}"
+
+/************************** DEFINES *************************/
+
+/* Define if the AI_ADDRCONFIG symbol is unavailable */
+#cmakedefine AI_ADDRCONFIG 0
+
+/* Possible value for SIGNED_RIGHT_SHIFT_IS */
+/* TODO: This is just set to 1 for the moment
+ port the macro aclocal/ax_signed_right_shift.m4 to CMake to make this work */
+#define ARITHMETIC_RIGHT_SHIFT 1
+
+/* Indicates the effect of the right shift operator on negative signed
+ integers */
+/* TODO: This is just set to 1 for the moment */
+#define SIGNED_RIGHT_SHIFT_IS 1
+
+/* Use *.h extension for parser header file */
+/* TODO: This might now be necessary anymore as it is set only for automake < 1.11
+ see: aclocal/ac_prog_bison.m4 */
+#cmakedefine BISON_USE_PARSER_H_EXTENSION 1
+
+/* replaces POSIX pthread by boost::thread */
+#cmakedefine USE_BOOST_THREAD 1
+
+/* replaces POSIX pthread by std::thread */
+#cmakedefine USE_STD_THREAD 1
+
+/* Define to 1 if strerror_r returns char *. */
+#cmakedefine STRERROR_R_CHAR_P 1
+
+
+/************************** HEADER FILES *************************/
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_ARPA_INET_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_FCNTL_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_NETDB_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_NETINET_IN_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_STDINT_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_PTHREAD_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_RESOURCE_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_UN_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_POLL_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the header file. */
+#cmakedefine HAVE_SCHED_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRINGS_H 1
+
+/*************************** FUNCTIONS ***************************/
+
+/* Define to 1 if you have the `gethostbyname' function. */
+#cmakedefine HAVE_GETHOSTBYNAME 1
+
+/* Define to 1 if you have the `gethostbyname_r' function. */
+#cmakedefine HAVE_GETHOSTBYNAME_R 1
+
+/* Define to 1 if you have the `strerror_r' function. */
+#cmakedefine HAVE_STRERROR_R 1
+
+/* Define to 1 if you have the `sched_get_priority_max' function. */
+#cmakedefine HAVE_SCHED_GET_PRIORITY_MAX 1
+
+/* Define to 1 if you have the `sched_get_priority_min' function. */
+#cmakedefine HAVE_SCHED_GET_PRIORITY_MIN 1
+
+
+/* Define to 1 if strerror_r returns char *. */
+#cmakedefine STRERROR_R_CHAR_P 1
+
+#endif
diff --git a/vendor/github.com/apache/thrift/build/cmake/mingw32-toolchain.cmake b/vendor/github.com/apache/thrift/build/cmake/mingw32-toolchain.cmake
new file mode 100644
index 000000000..864c0ebe4
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/cmake/mingw32-toolchain.cmake
@@ -0,0 +1,24 @@
+# CMake mingw32 cross compile toolchain file
+
+# the name of the target operating system
+SET(CMAKE_SYSTEM_NAME Windows)
+
+# which compilers to use for C and C++
+SET(CMAKE_C_COMPILER i586-mingw32msvc-gcc)
+SET(CMAKE_CXX_COMPILER i586-mingw32msvc-g++)
+SET(CMAKE_RC_COMPILER i586-mingw32msvc-windres)
+
+# here is the target environment located
+SET(CMAKE_FIND_ROOT_PATH /usr/i586-mingw32msvc)
+
+# adjust the default behaviour of the FIND_XXX() commands:
+# search headers and libraries in the target environment, search
+# programs in the host environment
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+set(BUILD_SHARED_LIBS OFF)
+SET(CMAKE_EXE_LINKER_FLAGS "-static")
+set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} "-static-libgcc")
+set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} "-static-libstdc++")
diff --git a/vendor/github.com/apache/thrift/build/docker/README.md b/vendor/github.com/apache/thrift/build/docker/README.md
new file mode 100644
index 000000000..85cb3b2ae
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/README.md
@@ -0,0 +1,27 @@
+# Apache Thrift Docker containers
+A set of docker containers used to build and test Apache Thrift
+
+### Available Containers
+
+* Ubuntu - based on ubuntu:trusty (14.04)
+* Centos - based on centos:6.6
+
+## Dependencies
+
+* A working Docker environment. A Vagrantfile is provided which will setup an Ubuntu host and working Docker environment as well as build the Apache Thrift Docker container for testing and development
+
+## Usage
+From the Apache Thrift code base root
+
+* Build
+
+ docker build -t thrift build/docker/ubuntu
+
+ or
+
+ docker build -t thrift build/docker/centos
+
+* Run
+
+ docker run -v $(pwd):/thrift/src -it thrift /bin/bash
+
diff --git a/vendor/github.com/apache/thrift/build/docker/Vagrantfile b/vendor/github.com/apache/thrift/build/docker/Vagrantfile
new file mode 100644
index 000000000..5eac6e686
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/Vagrantfile
@@ -0,0 +1,59 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Base system bootstrap script
+$bootstrap_script = <<__BOOTSTRAP__
+echo "Provisioning defaults"
+
+sudo apt-get update -y
+sudo apt-get upgrade -y
+
+# Install default packages
+sudo apt-get install -y build-essential curl git
+
+# Install latest Docker version
+sudo curl -sSL https://get.docker.io/gpg | sudo apt-key add -
+sudo echo "deb http://get.docker.io/ubuntu docker main" > /etc/apt/sources.list.d/docker.list
+sudo apt-get update -y
+sudo apt-get install -y linux-image-extra-`uname -r` aufs-tools
+sudo apt-get install -y lxc-docker
+
+echo "Finished provisioning defaults"
+__BOOTSTRAP__
+
+Vagrant.configure("2") do |config|
+ config.vm.box = "trusty64"
+ config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
+ config.ssh.forward_agent = true
+
+ config.vm.provider :virtualbox do |vbox|
+ vbox.customize ["modifyvm", :id, "--memory", "1024"]
+ vbox.customize ["modifyvm", :id, "--cpus", "2"]
+ end
+
+ # Setup the default bootstrap script for our ubuntu base box image
+ config.vm.provision "shell", inline: $bootstrap_script
+
+ # Setup the custom docker image from our Ubuntu Dockerfile
+ config.vm.provision "docker" do |d|
+ d.build_image "/vagrant/ubuntu", args: "-t thrift"
+ end
+
+ # Setup the custom docker image from our Centos Dockerfile
+ #config.vm.provision "docker" do |d|
+ # d.build_image "/vagrant/centos", args: "-t thrift-centos"
+ #end
+
+end
diff --git a/vendor/github.com/apache/thrift/build/docker/centos/Dockerfile b/vendor/github.com/apache/thrift/build/docker/centos/Dockerfile
new file mode 100644
index 000000000..974823bfd
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/centos/Dockerfile
@@ -0,0 +1,146 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Apache Thrift Docker build environment for Centos
+#
+# Known missing client libraries:
+# - D
+# - Haxe
+# - Lua
+#
+
+FROM centos:7
+MAINTAINER Apache Thrift
+
+RUN yum install -y epel-release
+
+# General dependencies
+RUN yum install -y \
+ tar \
+ m4 \
+ perl \
+ clang \
+ gcc \
+ gcc-c++ \
+ git \
+ libtool \
+ autoconf \
+ make \
+ bison \
+ bison-devel \
+ flex
+
+# C++ dependencies
+RUN yum install -y \
+ boost-devel-static \
+ zlib-devel \
+ openssl-devel \
+ libevent-devel
+
+# Java Dependencies
+RUN yum install -y \
+ ant \
+ junit \
+ ant-junit \
+ java-1.7.0-openjdk-devel
+
+# Python Dependencies
+RUN yum install -y \
+ python-devel \
+ python-pip \
+ python-setuptools \
+ python-six \
+ python-twisted-web && \
+ pip install -U backports.ssl_match_hostname ipaddress tornado
+
+# Ruby Dependencies
+RUN yum install -y \
+ ruby \
+ ruby-devel \
+ rubygems && \
+ gem install bundler rake
+
+# Perl Dependencies
+RUN yum install -y \
+ perl-Bit-Vector \
+ perl-Class-Accessor \
+ perl-ExtUtils-MakeMaker \
+ perl-Test-Simple \
+ perl-IO-Socket-SSL \
+ perl-Net-SSLeay \
+ perl-Crypt-SSLeay
+
+# PHP Dependencies
+RUN yum install -y \
+ php \
+ php-devel \
+ php-pear \
+ re2c \
+ php-phpunit-PHPUnit \
+ bzip2
+
+# GLibC Dependencies
+RUN yum install -y glib2-devel
+
+# Erlang Dependencies
+RUN curl -sSL http://packages.erlang-solutions.com/rpm/centos/erlang_solutions.repo -o /etc/yum.repos.d/erlang_solutions.repo && \
+ yum install -y \
+ erlang-kernel \
+ erlang-erts \
+ erlang-stdlib \
+ erlang-eunit \
+ erlang-rebar \
+ erlang-tools
+
+# Go Dependencies
+RUN curl -sSL https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+ENV PATH /usr/local/go/bin:$PATH
+
+# Haskell Dependencies
+RUN yum -y install haskell-platform
+
+# Node.js Dependencies
+RUN yum install -y \
+ nodejs \
+ nodejs-devel \
+ npm
+
+# C# Dependencies
+RUN yum install -y \
+ mono-core \
+ mono-devel \
+ mono-web-devel \
+ mono-extras
+
+# Rust
+RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.17.0
+ENV PATH /root/.cargo/bin:$PATH
+
+# MinGW Dependencies
+RUN yum install -y \
+ mingw32-binutils \
+ mingw32-crt \
+ mingw32-nsis
+
+# CMake
+RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.0.tar.gz | tar -xz && \
+ cd cmake-3.4.0 && ./bootstrap && make -j4 && make install && \
+ cd .. && rm -rf cmake-3.4.0
+
+# Clean up
+RUN rm -rf /tmp/* && \
+ yum clean all
+
+ENV THRIFT_ROOT /thrift
+RUN mkdir -p $THRIFT_ROOT/src
+COPY Dockerfile $THRIFT_ROOT/
+WORKDIR $THRIFT_ROOT/src
diff --git a/vendor/github.com/apache/thrift/build/docker/centos6/Dockerfile b/vendor/github.com/apache/thrift/build/docker/centos6/Dockerfile
new file mode 100644
index 000000000..5567ab7a4
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/centos6/Dockerfile
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Apache Thrift Docker build environment for Centos 6
+#
+# This file is intended for testing old packages that are not available for
+# latest Ubuntu LTS/Debian/CentOS. Currently, it is only used for Python 2.6.
+#
+
+FROM centos:6
+MAINTAINER Apache Thrift
+
+RUN yum install -y epel-release && \
+ yum install -y \
+ autoconf \
+ bison \
+ bison-devel \
+ clang \
+ flex \
+ gcc \
+ gcc-c++ \
+ git \
+ libtool \
+ m4 \
+ make \
+ perl \
+ tar \
+ python-devel \
+ python-setuptools \
+ python-twisted-web \
+ python-pip \
+ && yum clean all
+
+# optional dependencies
+# skipping ipaddress and backports.ssl_match_hostname to test legacy callback
+# RUN pip install ipaddress backports.ssl_match_hostname tornado
+RUN pip install tornado
+
+# CMake
+RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \
+ cd cmake-3.4.1 && ./bootstrap && make -j4 && make install && \
+ cd .. && rm -rf cmake-3.4.1
+
+ENV THRIFT_ROOT /thrift
+RUN mkdir -p $THRIFT_ROOT/src
+COPY Dockerfile $THRIFT_ROOT/
+WORKDIR $THRIFT_ROOT/src
diff --git a/vendor/github.com/apache/thrift/build/docker/check_unmodified.sh b/vendor/github.com/apache/thrift/build/docker/check_unmodified.sh
new file mode 100755
index 000000000..9d5fa2672
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/check_unmodified.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Download prebuilt docker image and compare Dockerfile hash values
+
+set -ex
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+DISTRO=$1
+SRC_IMG=thrift/thrift-build:$DISTRO
+
+function try_pull {
+ docker pull $SRC_IMG
+ cd ${SCRIPT_DIR}/$DISTRO
+ docker run $SRC_IMG bash -c 'cd .. && sha512sum Dockerfile' > .Dockerfile.sha512
+ sha512sum -c .Dockerfile.sha512
+}
+
+if try_pull; then
+ echo Dockerfile seems identical. No need to rebuild from scratch.
+ docker tag thrift/thrift-build:$DISTRO thrift-build:$DISTRO
+else
+ echo Either Dockerfile has changed or pull failure. Need to build brand new one.
+ exit 1
+fi
diff --git a/vendor/github.com/apache/thrift/build/docker/debian/Dockerfile b/vendor/github.com/apache/thrift/build/docker/debian/Dockerfile
new file mode 100644
index 000000000..8aa0902c3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/debian/Dockerfile
@@ -0,0 +1,204 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Apache Thrift Docker build environment for Debian
+#
+# Known missing client libraries:
+# - dotnetcore
+# - rust
+
+FROM buildpack-deps:jessie-scm
+MAINTAINER Apache Thrift
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Add apt sources
+# jessie-backports for cmake and some ruby bits
+RUN echo "deb http://ftp.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list
+
+# Dart
+RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
+ curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \
+ sed -i /etc/apt/sources.list.d/dart_stable.list -e 's/https:/http:/g'
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# General dependencies` \
+ bison \
+ build-essential \
+ clang \
+ debhelper \
+ flex \
+ pkg-config && \
+ apt-get -t jessie-backports install -y --no-install-recommends cmake
+
+RUN apt-get install -y --no-install-recommends \
+`# C++ dependencies` \
+ libboost-dev \
+ libboost-filesystem-dev \
+ libboost-program-options-dev \
+ libboost-system-dev \
+ libboost-test-dev \
+ libboost-thread-dev \
+ libevent-dev \
+ libssl-dev \
+ qt5-default \
+ qtbase5-dev \
+ qtbase5-dev-tools
+
+RUN apt-get install -y --no-install-recommends \
+`# Java dependencies` \
+ ant \
+ ant-optional \
+ openjdk-7-jdk \
+ maven
+
+RUN apt-get install -y --no-install-recommends \
+`# Python dependencies` \
+ python-all \
+ python-all-dbg \
+ python-all-dev \
+ python-pip \
+ python-setuptools \
+ python-twisted \
+ python-zope.interface \
+ python3-all \
+ python3-all-dbg \
+ python3-all-dev \
+ python3-setuptools \
+ python3-pip
+
+RUN apt-get install -y --no-install-recommends \
+`# Ruby dependencies` \
+ ruby \
+ ruby-dev \
+`# Perl dependencies` \
+ libbit-vector-perl \
+ libclass-accessor-class-perl \
+ libcrypt-ssleay-perl \
+ libio-socket-ssl-perl \
+ libnet-ssleay-perl
+
+RUN apt-get -t jessie-backports install -y ruby-bundler
+RUN apt-get install -y --no-install-recommends \
+`# Php dependencies` \
+ php5 \
+ php5-dev \
+ php5-cli \
+ php-pear \
+ re2c \
+ phpunit \
+`# GlibC dependencies` \
+ libglib2.0-dev
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# Erlang dependencies` \
+ erlang-base \
+ erlang-eunit \
+ erlang-dev \
+ erlang-tools \
+ rebar
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# Haskell dependencies` \
+ ghc \
+ cabal-install \
+`# Haxe dependencies` \
+ neko \
+ neko-dev \
+ libneko0
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# Node.js dependencies` \
+ nodejs \
+ nodejs-dev \
+ nodejs-legacy \
+ npm
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# CSharp dependencies` \
+ libmono-system-web2.0-cil \
+ mono-devel
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# D dependencies` \
+ xdg-utils \
+`# Dart dependencies` \
+ dart \
+`# Lua dependencies` \
+ lua5.2 \
+ lua5.2-dev \
+`# MinGW dependencies` \
+ mingw32 \
+ mingw32-binutils \
+`# mingw32-runtime` \
+ nsis \
+`# Clean up` \
+ && rm -rf /var/cache/apt/* && \
+ rm -rf /var/lib/apt/lists/* && \
+ rm -rf /tmp/* && \
+ rm -rf /var/tmp/*
+
+# Ruby
+RUN gem install bundler --no-ri --no-rdoc
+
+# Python optional dependencies
+RUN pip2 install -U ipaddress backports.ssl_match_hostname tornado
+RUN pip3 install -U backports.ssl_match_hostname tornado
+
+# Go
+RUN curl -sSL https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+ENV PATH /usr/local/go/bin:$PATH
+
+# Haxe
+RUN mkdir -p /usr/lib/haxe && \
+ wget -O - https://github.com/HaxeFoundation/haxe/releases/download/3.2.1/haxe-3.2.1-linux64.tar.gz | \
+ tar -C /usr/lib/haxe --strip-components=1 -xz && \
+ ln -s /usr/lib/haxe/haxe /usr/bin/haxe && \
+ ln -s /usr/lib/haxe/haxelib /usr/bin/haxelib && \
+ mkdir -p /usr/lib/haxe/lib && \
+ chmod -R 777 /usr/lib/haxe/lib && \
+ haxelib setup /usr/lib/haxe/lib && \
+ haxelib install hxcpp
+
+# D
+RUN curl -sSL http://downloads.dlang.org/releases/2.x/2.070.0/dmd_2.070.0-0_amd64.deb -o /tmp/dmd_2.070.0-0_amd64.deb && \
+ dpkg -i /tmp/dmd_2.070.0-0_amd64.deb && \
+ rm /tmp/dmd_2.070.0-0_amd64.deb && \
+ curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \
+ curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \
+ mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \
+ mv libevent-master/deimos/* openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
+ mv libevent-master/C/* openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \
+ rm -rf libevent-master openssl-master && \
+ echo 'gcc -Wl,--no-as-needed $*' > /usr/local/bin/gcc-dmd && \
+ chmod 755 /usr/local/bin/gcc-dmd && \
+ echo 'CC=/usr/local/bin/gcc-dmd' >> /etc/dmd.conf
+
+# Dart
+ENV PATH /usr/lib/dart/bin:$PATH
+
+# OCaml
+RUN echo 'deb http://ppa.launchpad.net/avsm/ppa/ubuntu trusty main' > /etc/apt/sources.list.d/avsm-official-ocaml.list && \
+ gpg --keyserver keyserver.ubuntu.com --recv 61707B09 && \
+ gpg --export --armor 61707B09 | apt-key add - && \
+ apt-get update && \
+ apt-get install -y ocaml opam && \
+ opam init && \
+ opam install oasis
+
+# Force utf8 locale to successfully build Haskell tf-random
+ENV LC_ALL C.UTF-8
+
+ENV THRIFT_ROOT /thrift
+RUN mkdir -p $THRIFT_ROOT/src
+COPY Dockerfile $THRIFT_ROOT/
+WORKDIR $THRIFT_ROOT/src
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/autotools.sh b/vendor/github.com/apache/thrift/build/docker/scripts/autotools.sh
new file mode 100755
index 000000000..8388f728c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/autotools.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -ev
+
+./bootstrap.sh
+./configure $*
+make check -j3
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/cmake.sh b/vendor/github.com/apache/thrift/build/docker/scripts/cmake.sh
new file mode 100755
index 000000000..6508e7108
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/cmake.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+set -ev
+
+CMAKE_FLAGS=$*
+MAKEPROG=make
+
+if ninja --version >/dev/null 2>&1; then
+ MAKEPROG=ninja
+ CMAKE_FLAGS="-GNinja $CMAKE_FLAGS"
+fi
+
+mkdir -p cmake_build && cd cmake_build
+cmake $CMAKE_FLAGS ..
+for LIB in $BUILD_LIBS; do
+ if ! grep "^BUILD_${LIB}:BOOL=ON$" CMakeCache.txt ; then
+ echo "failed to configure $LIB"
+ exit 1
+ fi
+done
+$MAKEPROG -j3
+cpack
+ctest -VV
+# was: -E "(concurrency_test|processor_test)"
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/cross-test.sh b/vendor/github.com/apache/thrift/build/docker/scripts/cross-test.sh
new file mode 100755
index 000000000..43581a5f3
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/cross-test.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+set -ev
+
+./bootstrap.sh
+./configure --enable-tutorial=no
+make -j3 precross
+
+set +e
+make cross$1
+
+RET=$?
+if [ $RET -ne 0 ]; then
+ cat test/log/unexpected_failures.log
+fi
+
+exit $RET
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/dpkg.sh b/vendor/github.com/apache/thrift/build/docker/scripts/dpkg.sh
new file mode 100755
index 000000000..3ba0cd482
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/dpkg.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+set -ev
+
+dpkg-buildpackage -tc -us -uc
+ls -al ..
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/make-dist.sh b/vendor/github.com/apache/thrift/build/docker/scripts/make-dist.sh
new file mode 100755
index 000000000..5a3681e18
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/make-dist.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ev
+
+./bootstrap.sh
+./configure $*
+make dist
+tar xvf thrift-*.tar.gz
+cd thrift-*
+./build/docker/scripts/cmake.sh
diff --git a/vendor/github.com/apache/thrift/build/docker/scripts/ubsan.sh b/vendor/github.com/apache/thrift/build/docker/scripts/ubsan.sh
new file mode 100755
index 000000000..d39cc8361
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/scripts/ubsan.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+set -ex
+
+# Wraps autotools.sh, but each binary crashes if it exhibits undefined behavior. See
+# http://releases.llvm.org/3.8.0/tools/clang/docs/UndefinedBehaviorSanitizer.html
+
+# Install a more recent clang than default:
+sudo apt-get update
+sudo apt-get install -y --no-install-recommends clang-3.8 llvm-3.8-dev
+export CC=clang-3.8
+export CXX=clang++-3.8
+
+# Set the undefined behavior flags. This crashes on all undefined behavior except for
+# undefined casting, aka "vptr".
+#
+# TODO: fix undefined vptr behavior and turn this option back on.
+export CFLAGS="-fsanitize=undefined -fno-sanitize-recover=undefined"
+# Builds without optimization and with debugging symbols for making crash reports more
+# readable.
+export CFLAGS="${CFLAGS} -O0 -ggdb3"
+export CXXFLAGS="${CFLAGS}"
+export UBSAN_OPTIONS=print_stacktrace=1
+
+# llvm-symbolizer must be on PATH, but the above installation instals a binary called
+# "llvm-symbolizer-3.8", not "llvm-symbolizer". This fixes that with a softlink in a new
+# directory.
+CLANG_PATH="$(mktemp -d)"
+trap "rm -rf ${CLANG_PATH}" EXIT
+ln -s "$(whereis llvm-symbolizer-3.8 | rev | cut -d ' ' -f 1 | rev)" \
+ "${CLANG_PATH}/llvm-symbolizer"
+export PATH="${CLANG_PATH}:${PATH}"
+llvm-symbolizer -version
+
+build/docker/scripts/autotools.sh $*
diff --git a/vendor/github.com/apache/thrift/build/docker/ubuntu/Dockerfile b/vendor/github.com/apache/thrift/build/docker/ubuntu/Dockerfile
new file mode 100644
index 000000000..25089eb36
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/docker/ubuntu/Dockerfile
@@ -0,0 +1,231 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Apache Thrift Docker build environment for Ubuntu
+#
+# Known missing client libraries:
+# - dotnetcore
+
+FROM buildpack-deps:trusty-scm
+MAINTAINER Apache Thrift
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Add apt sources
+# CMAKE
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends software-properties-common && \
+ add-apt-repository -y ppa:george-edison55/cmake-3.x
+
+# Erlang
+RUN echo 'deb http://packages.erlang-solutions.com/debian trusty contrib' > /etc/apt/sources.list.d/erlang_solutions.list && \
+ curl -sSL https://packages.erlang-solutions.com/debian/erlang_solutions.asc | apt-key add -
+
+# Dart
+RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
+ curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \
+ sed -i /etc/apt/sources.list.d/dart_stable.list -e 's/https:/http:/g'
+
+# Consider using mirror nearby when building locally
+# TODO: Provide option via --build-arg=...
+# RUN sed -i /etc/apt/sources.list -e 's!http://archive.ubuntu.com/ubuntu/!http://your/mirror/!g'
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# General dependencies` \
+ bison \
+ build-essential \
+ clang \
+ cmake \
+ debhelper \
+ flex \
+ ninja-build \
+ pkg-config \
+`# Included in buildpack-deps` \
+`# autoconf` \
+`# automake` \
+`# g++` \
+`# git` \
+`# libtool` \
+`# make`
+
+RUN apt-get install -y --no-install-recommends \
+`# C++ dependencies` \
+`# libevent and OpenSSL are needed by D too` \
+ libboost-dev \
+ libboost-filesystem-dev \
+ libboost-program-options-dev \
+ libboost-system-dev \
+ libboost-test-dev \
+ libboost-thread-dev \
+ libevent-dev \
+ libssl-dev \
+ qt5-default \
+ qtbase5-dev \
+ qtbase5-dev-tools
+
+RUN apt-get install -y --no-install-recommends \
+`# Java dependencies` \
+ ant \
+ ant-optional \
+ openjdk-7-jdk \
+ maven
+
+RUN apt-get install -y --no-install-recommends \
+`# Python dependencies` \
+`# TODO:` \
+`# Install twisted and zope.interface via pip. we need twisted at ./configure time, otherwise` \
+`# py.twisted tests are skipped.` \
+ python-all \
+ python-all-dbg \
+ python-all-dev \
+ python-pip \
+ python-setuptools \
+ python-twisted \
+ python-zope.interface \
+ python3-all \
+ python3-all-dbg \
+ python3-all-dev \
+ python3-setuptools \
+ python3-pip
+
+RUN apt-get install -y --no-install-recommends \
+`# Ruby dependencies` \
+ ruby \
+ ruby-dev \
+ ruby-bundler \
+`# Perl dependencies` \
+ libbit-vector-perl \
+ libclass-accessor-class-perl \
+ libcrypt-ssleay-perl \
+ libio-socket-ssl-perl \
+ libnet-ssleay-perl
+
+RUN apt-get install -y --no-install-recommends \
+`# Php dependencies` \
+ php5 \
+ php5-dev \
+ php5-cli \
+ php-pear \
+ re2c \
+ phpunit \
+`# GlibC dependencies` \
+ libglib2.0-dev
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# Erlang dependencies` \
+ erlang-base \
+ erlang-eunit \
+ erlang-dev \
+ erlang-tools \
+ rebar
+
+RUN apt-get install -y --no-install-recommends \
+`# Haskell dependencies` \
+ ghc \
+ cabal-install \
+`# Haxe dependencies` \
+ neko \
+ neko-dev \
+ libneko0
+
+# Newer release of nodejs
+RUN curl -sL https://deb.nodesource.com/setup_6.x | bash
+RUN apt-get install -y --no-install-recommends \
+`# Node.js dependencies` \
+ nodejs
+
+# Add mono package repository url to get latest version of mono
+RUN echo "deb http://download.mono-project.com/repo/debian trusty main" | tee /etc/apt/sources.list.d/mono.list
+RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF
+RUN apt-get update && apt-get install -y --no-install-recommends \
+`# CSharp dependencies` \
+ mono-devel
+
+RUN apt-get install -y --no-install-recommends \
+`# D dependencies` \
+ xdg-utils \
+`# Dart dependencies` \
+ dart \
+`# Lua dependencies` \
+ lua5.2 \
+ lua5.2-dev \
+`# MinGW dependencies` \
+ mingw32 \
+ mingw32-binutils \
+ mingw32-runtime \
+ nsis \
+`# Clean up` \
+ && rm -rf /var/cache/apt/* && \
+ rm -rf /var/lib/apt/lists/* && \
+ rm -rf /tmp/* && \
+ rm -rf /var/tmp/*
+
+# Ruby
+RUN gem install bundler --no-ri --no-rdoc
+
+# Python optional dependencies
+RUN pip2 install -U ipaddress backports.ssl_match_hostname tornado
+RUN pip3 install -U backports.ssl_match_hostname tornado
+
+# Go
+RUN curl -sSL https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+ENV PATH /usr/local/go/bin:$PATH
+
+# Haxe
+RUN mkdir -p /usr/lib/haxe && \
+ wget -O - https://github.com/HaxeFoundation/haxe/releases/download/3.2.1/haxe-3.2.1-linux64.tar.gz | \
+ tar -C /usr/lib/haxe --strip-components=1 -xz && \
+ ln -s /usr/lib/haxe/haxe /usr/bin/haxe && \
+ ln -s /usr/lib/haxe/haxelib /usr/bin/haxelib && \
+ mkdir -p /usr/lib/haxe/lib && \
+ chmod -R 777 /usr/lib/haxe/lib && \
+ haxelib setup /usr/lib/haxe/lib && \
+ haxelib install hxcpp
+
+# Node.js
+# temporarily removed since this breaks the build (and is not needed to test C# code)
+# RUN curl -sSL https://www.npmjs.com/install.sh | sh
+
+# D
+RUN curl -sSL http://downloads.dlang.org/releases/2.x/2.070.0/dmd_2.070.0-0_amd64.deb -o /tmp/dmd_2.070.0-0_amd64.deb && \
+ dpkg -i /tmp/dmd_2.070.0-0_amd64.deb && \
+ rm /tmp/dmd_2.070.0-0_amd64.deb && \
+ curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \
+ curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \
+ mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \
+ mv libevent-master/deimos/* openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
+ mv libevent-master/C/* openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \
+ rm -rf libevent-master openssl-master && \
+ echo 'gcc -Wl,--no-as-needed $*' > /usr/local/bin/gcc-dmd && \
+ chmod 755 /usr/local/bin/gcc-dmd && \
+ echo 'CC=/usr/local/bin/gcc-dmd' >> /etc/dmd.conf
+
+# Dart
+ENV PATH /usr/lib/dart/bin:$PATH
+
+# OCaml
+RUN echo 'deb http://ppa.launchpad.net/avsm/ppa/ubuntu trusty main' > /etc/apt/sources.list.d/avsm-official-ocaml.list && \
+ gpg --keyserver keyserver.ubuntu.com --recv 61707B09 && \
+ gpg --export --armor 61707B09 | apt-key add - && \
+ apt-get update && \
+ apt-get install -y ocaml opam && \
+ opam init && \
+ opam install oasis
+
+# Rust
+RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.17.0
+ENV PATH /root/.cargo/bin:$PATH
+
+ENV THRIFT_ROOT /thrift
+RUN mkdir -p $THRIFT_ROOT/src
+COPY Dockerfile $THRIFT_ROOT/
+WORKDIR $THRIFT_ROOT/src
diff --git a/vendor/github.com/apache/thrift/build/travis/installCXXDependencies.sh b/vendor/github.com/apache/thrift/build/travis/installCXXDependencies.sh
new file mode 100755
index 000000000..ac3edf381
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/travis/installCXXDependencies.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# Mainly aiming Travis CI's Ubuntu machines for now
+# see what we need: http://thrift.apache.org/docs/install/ubuntu
+
+# General dependencies
+sudo apt-add-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main restricted" -y
+sudo apt-get update -qq
+
+sudo apt-get install -qq libpango-1.0-0 libqt4-dev qtbase5-dev qtbase5-dev-tools qt5-default libboost-dev libboost-test-dev libboost-program-options-dev libboost-system-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev make cmake git debhelper bc nsis ninja-build
+dpkg -S /usr/include/boost/version.hpp
diff --git a/vendor/github.com/apache/thrift/build/travis/installDependencies.sh b/vendor/github.com/apache/thrift/build/travis/installDependencies.sh
new file mode 100755
index 000000000..eab8c6b6b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/travis/installDependencies.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+SCRIPTPATH=$( cd $(dirname $0) ; pwd -P )
+
+# Mainly aiming Travis CI's Ubuntu machines for now
+# see what we need: http://thrift.apache.org/docs/install/ubuntu
+
+# Java dependencies
+sudo apt-get install -qq ant openjdk-7-jdk
+sudo update-java-alternatives -s java-1.7.0-openjdk-amd64
+
+# Python dependencies
+sudo apt-get install -qq python-all python-all-dev python-all-dbg python-setuptools python-support python-twisted python-six python3-six
+
+# Ruby dependencies
+sudo apt-get install -qq ruby ruby-dev
+sudo gem install bundler rake
+
+# Perl dependencies
+sudo apt-get install -qq libbit-vector-perl libclass-accessor-class-perl libio-socket-ssl-perl libnet-ssleay-perl libcrypt-ssleay-perl
+
+# Php dependencies
+sudo apt-get install -qq php5 php5-dev php5-cli php-pear re2c
+
+# GlibC dependencies
+sudo apt-get install -qq libglib2.0-dev
+
+# Erlang dependencies
+sudo apt-get install -qq erlang-base erlang-eunit erlang-dev erlang-tools rebar
+
+# GO dependencies
+echo "golang-go golang-go/dashboard boolean false" | debconf-set-selections
+sudo apt-get -y install -qq golang golang-go
+
+# Haskell dependencies
+sudo add-apt-repository -y ppa:hvr/ghc
+sudo apt-get update
+sudo apt-get install cabal-install-1.20 ghc-$GHCVER
+
+# Lua dependencies
+sudo apt-get install -qq lua5.2 lua5.2-dev
+
+# Node.js dependencies
+sudo apt-get install -qq nodejs nodejs-dev npm
+sudo update-alternatives --install /usr/bin/node node /usr/bin/nodejs 10
+
+# CSharp
+sudo apt-get install -qq mono-gmcs mono-devel libmono-system-web2.0-cil
+sudo apt-get install -qq mingw32 mingw32-binutils mingw32-runtime nsis
diff --git a/vendor/github.com/apache/thrift/build/wincpp/README.md b/vendor/github.com/apache/thrift/build/wincpp/README.md
new file mode 100644
index 000000000..a23178040
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/README.md
@@ -0,0 +1,219 @@
+
+
+# Building thrift on Windows (Native)
+
+Thrift uses cmake to make it easier to build the project on multiple platforms, however to build a fully functional and production ready thrift on Windows requires a number of third party libraries to be obtained or built. Once third party libraries are ready, the right combination of options must be passed to cmake in order to generate the correct environment.
+
+## Summary
+
+These instructions will help you build thrift for windows using Visual
+Studio 2010 or later. The contributed batch files will help you build
+the third party libraries needed for complete thrift functionality as
+well as thrift itself.
+
+These instructions follow a directory layout that looks like the following:
+
+ workspace\
+ build\ - this is where the out-of-tree thrift cmake builds are generated
+ dist\ - this is where the thrift build results end up
+ thirdparty\ - this is where all third party binaries and libraries live
+ build\ - this is where all third party out-of-tree builds are generated
+ (except for openssl, which only builds in-tree)
+ dist\ - this is where all third party distributions end up
+ src\ - this is where all third party source projects live
+ scripts\ - batch files used to set environment variables for builds
+ thrift\ - this is where the thrift source project lives
+
+Create a "workspace" directory somewhere on your system and then copy the contents of this
+directory to there, then clone or unpack thrift into `workspace\thrift`.
+
+## Third Party Libraries
+
+Batch scripts are provided to build some third party libraries. You must download them and place them into the directory noted for each. You can use different versions if you prefer; these instructions were made with the versions listed.
+
+> TIP: To modify the versions used in the batch scripts, look in scripts\tpversions.bat.
+
+Build them in the order listed to satisfy their dependencies.
+
+### winflexbison
+
+ source: web site
+ location: https://sourceforge.net/projects/winflexbison/files/win_flex_bison-latest.zip/download
+ version: "latest"
+ directory: workspace\thirdparty\dist\winflexbison
+
+This package is required to build the compiler. This third party package does not need to be built as it is a binary distribution of the "bison" and "flex" tools normally found on Unix boxes.
+
+> TIP: If you are only interested in building the compiler, you can skip the remaining third party libraries.
+
+### zlib
+
+ source: web site
+ location: http://zlib.net/
+ version: 1.2.9
+ directory: workspace\thirdparty\src\zlib-1.2.9
+
+To build, open the appropriate Visual Studio command prompt and then run
+the build-zlib.bat script in thirdparty\src.
+
+### openssl
+
+ source: web site
+ location: https://www.openssl.org/
+ version: 1.1.0c
+ directory: workspace\thirdparty\src\openssl-1.1.0c
+ depends-on: zlib
+
+If you are using openssl-1.1.0 or later, they changed static builds to use Microsoft Static RTL for release builds. zlib by default uses a dynamic runtime, as does libevent. Edit the file Configurations/10-main.conf and replace the section contents for "VC-noCE-common" with what appears below to make openssl build with dynamic runtime instead:
+
+ "VC-noCE-common" => {
+ inherit_from => [ "VC-common" ],
+ template => 1,
+ cflags => add(picker(default => "-DUNICODE -D_UNICODE",
+ debug => "/MDd /Od -DDEBUG -D_DEBUG",
+ release => "/MD /O2"
+ )),
+ bin_cflags => add(picker(debug => "/MDd",
+ release => "/MD",
+ )),
+ bin_lflags => add("/subsystem:console /opt:ref"),
+ ex_libs => add(sub {
+ my @ex_libs = ();
+ push @ex_libs, 'ws2_32.lib' unless $disabled{sock};
+ push @ex_libs, 'gdi32.lib advapi32.lib crypt32.lib user32.lib';
+ return join(" ", @ex_libs);
+ }),
+ },
+
+To build, open the appropriate Visual Studio command prompt and then run
+the build-openssl.bat script in thirdparty\src.
+
+### libevent
+
+ source: git
+ location: https://github.com/nmathewson/Libevent.git
+ use: commit 3821cca1a637f4da4099c9343e7326da00f6981c or later
+ date: Fri Dec 23 16:19:35 2016 +0800 or later
+ version: corresponds to 2.1.7rc + patches
+ directory: workspace\thirdparty\src\libevent-2.1.7rc2
+ depends-on: openssl, zlib
+
+To build, open the appropriate Visual Studio command prompt and then run
+the build-libevent.bat script in thirdparty\src.
+
+### msinttypes
+
+ source: web site
+ location: https://code.google.com/archive/p/msinttypes/downloads
+ version: 26
+ directory: workspace\thirdparty\dist\msinttypes
+
+> TIP: This is only necessary for Visual Studio 2010, which did not include an header.
+
+This third party package does not need to be built as it is a distribution of header files.
+
+### boost
+
+ source: web site
+ location: http://boost.teeks99.com/
+ version: 1_62_0
+ directory: workspace\thirdparty\dist\boost_1_62_0
+
+The pre-built binary versions of boost come in self-unpacking executables. Run each of the ones you are interested in and point them at the same thirdparty dist directory.
+
+## Building a Production thrift Compiler
+
+### Prerequisites
+
+* CMake-2.8.12.2 or later
+* Visual Studio 2010 or later
+* thrift source placed into workspace\thrift
+* winflexbison placed into workspace\thirdparty\dist
+
+### Instructions
+
+By following these instructions you will end up with a release mode thrift compiler that is suitable for distribution as it has no external dependencies.
+
+1. Open the appropriate Visual Studio Command Prompt.
+2. `cd workspace`
+3. `build-thrift-compiler.bat`
+
+The batch file uses CMake to generate an out-of-tree build directory in `workspace\build` and then builds the compiler. The resulting `thrift.exe` program is placed into `workspace\dist` in a path that depends on your compiler version and platform. For example, if you use a Visual Studio 2010 x64 Command Prompt, the compiler will be placed into `workspace\dist\thrift-compiler-dev\vc100\x64\Release\thrift.exe`
+
+#### Details
+
+This section is for those who are curious about the CMake options used in the build process.
+
+CMake takes the source tree as the first argument and uses the remaining arguments for configuration. The batch file `build-thrift-compiler` essentially performs the following commands:
+
+ C:\> CD workspace\build
+ C:\workspace\build> "C:\Program Files\CMake\bin\cmake.exe" ..\thrift
+ -DBISON_EXECUTABLE=..\thirdparty\dist\winflexbison\win_bison.exe
+ -DCMAKE_BUILD_TYPE=Release
+ -DFLEX_EXECUTABLE=..\thirdparty\dist\winflexbison\win_flex.exe
+ -DWITH_MT=ON
+ -DWITH_SHARED_LIB=OFF
+ -G"NMake Makefiles"
+ C:\workspace\build> NMAKE /FMakefile thrift-compiler
+
+WITH_MT controls the dynamic or static runtime library selection. To build a production compiler, the thrift project recommends using the static runtime library to make the executable portable. The batch file sets this.
+
+You can build a Visual Studio project file by following the example but substituting a different generator for the "-G" option. Run `cmake.exe --help` for a list of generators. Typically, this is one of the following on Windows (omit "Win64" to build 32-bit instead):
+
+* "Visual Studio 10 2010 Win64"
+* "Visual Studio 11 2012 Win64"
+* "Visual Studio 12 2013 Win64"
+* "Visual Studio 14 2015 Win64"
+* "Visual Studio 15 2017 Win64"
+
+For example you can build using a Visual Studio solution file on the command line by doing:
+
+ C:\> CD workspace\build
+ C:\workspace\build> "C:\Program Files\CMake\bin\cmake.exe" ..\thrift
+ -DBISON_EXECUTABLE=..\thirdparty\dist\winflexbison\win_bison.exe
+ -DCMAKE_BUILD_TYPE=Release
+ -DFLEX_EXECUTABLE=..\thirdparty\dist\winflexbison\win_flex.exe
+ -DWITH_MT=ON
+ -DWITH_SHARED_LIB=OFF
+ -G"Visual Studio 14 2015 Win64"
+ C:\workspace\build> MSBUILD "Apache Thrift.sln" /p:Configuration=Release /p:Platform=x64 /t:thrift-compiler
+
+You can also double-click on the solution file to bring it up in Visual Studio and build or debug interactively from there.
+
+## Building the thrift C++ Run-Time Library
+
+These instructions are similar to the compiler build however there are additional dependencies on third party libraries to build a feature-complete runtime. The resulting static link library for thrift uses a dynamic Microsoft runtime.
+
+1. Open the desired Visual Studio Command Prompt.
+2. `cd workspace`
+3. `build-thrift.bat`
+
+Thrift depends on boost, libevent, openssl, and zlib in order to build with all server and transport types. To use later versions of boost like 1.62 you will need a recent version of cmake (at least 3.7).
+
+The build-thrift script has options to build debug or release and to optionally disable any of the generation (cmake), build, or test phases. By default, the batch file will generate an out-of-tree build directory inside `workspace\build`, then perform a release build, then run the unit tests. The batch file accepts some option flags to control its behavior:
+
+ :: Flags you can use to change this behavior:
+ ::
+ :: /DEBUG - if building, perform a debug build instead
+ :: /NOGENERATE - skip cmake generation - useful if you
+ :: have already generated a solution and just
+ :: want to build
+ :: /NOBUILD - skip cmake build - useful if you just
+ :: want to generate a solution
+ :: /NOTEST - skip ctest execution
+
+For example if you want to generate the cmake environment without building or running tests:
+
+ C:\workspace> build-thrift.bat /NOBUILD /NOTEST
diff --git a/vendor/github.com/apache/thrift/build/wincpp/build-thrift-compiler.bat b/vendor/github.com/apache/thrift/build/wincpp/build-thrift-compiler.bat
new file mode 100644
index 000000000..b6b42a8d7
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/build-thrift-compiler.bat
@@ -0,0 +1,79 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Produces a production thrift compiler suitable for redistribution.
+:: The compiler is linked to runtime statically for maximum portability.
+:: Assumes the thirdparty files for "winflexbison" have been placed
+:: according to the README.md instructions.
+::
+:: Open a Visual Studio Command Prompt of your choosing and then
+:: run this script.
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+IF NOT DEFINED PACKAGE_NAME SET PACKAGE_NAME=thrift
+IF NOT DEFINED PACKAGE_VERSION SET PACKAGE_VERSION=dev
+IF NOT DEFINED SOURCE_DIR SET SOURCEDIR=%~dp0%PACKAGE_NAME%
+IF NOT DEFINED WIN3P_ROOT SET WIN3P_ROOT=%~dp0thirdparty
+
+:: Set COMPILER to (vc100 - vc140) depending on the current environment
+CALL scripts\cl_setcompiler.bat || EXIT /B
+
+:: Set ARCH to either win32 or x64 depending on the current environment
+CALL scripts\cl_setarch.bat || EXIT /B
+
+:: Set GENERATOR for CMake depending on the current environment
+CALL scripts\cl_setgenerator.bat || EXIT /B
+
+IF NOT DEFINED BUILDTYPE (
+ SET BUILDTYPE=Release
+)
+
+ SET BUILDDIR=%~dp0build\%PACKAGE_NAME%-compiler\%PACKAGE_VERSION%\%COMPILER%\
+ SET OUTDIR=%~dp0dist\%PACKAGE_NAME%-compiler-%PACKAGE_VERSION%\%COMPILER%\%ARCH%\%BUILDTYPE%\
+ SET BOOST_LIBDIR=lib%ARCH:~-2,2%-msvc-%COMPILER:~-3,2%.0
+ IF "%BUILDTYPE%" == "Debug" (SET ZLIB_STATIC_SUFFIX=d)
+
+ ECHO/
+ ECHO =========================================================================
+ ECHO Configuration: %PACKAGE_NAME% %PACKAGE_VERSION% %COMPILER%:%ARCH%:%BUILDTYPE% "%GENERATOR%"
+IF DEFINED COMPILERONLY (
+ ECHO COMPILER ONLY
+)
+ ECHO Build Directory: %BUILDDIR%
+ ECHO Install Directory: %OUTDIR%
+ ECHO Source Directory: %SOURCEDIR%
+ ECHO =========================================================================
+ ECHO/
+
+ MKDIR "%BUILDDIR%"
+ CD "%BUILDDIR%" || EXIT /B
+
+ CMAKE.EXE %~dp0thrift ^
+ -G"%GENERATOR%" ^
+ -DBISON_EXECUTABLE=%WIN3P_ROOT%\dist\winflexbison\win_bison.exe ^
+ -DCMAKE_BUILD_TYPE=%BUILDTYPE% ^
+ -DFLEX_EXECUTABLE=%WIN3P_ROOT%\dist\winflexbison\win_flex.exe ^
+ -DWITH_MT=ON ^
+ -DWITH_SHARED_LIB=OFF || EXIT /B
+
+ CD %BUILDDIR%
+
+ CMAKE.EXE --build . --config %BUILDTYPE% --target thrift-compiler || EXIT /B
+ XCOPY /F /Y %BUILDDIR%\bin\%BUILDTYPE%\thrift.exe %OUTDIR%
+
+ENDLOCAL
+EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/wincpp/build-thrift.bat b/vendor/github.com/apache/thrift/build/wincpp/build-thrift.bat
new file mode 100644
index 000000000..ba3e47675
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/build-thrift.bat
@@ -0,0 +1,164 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Generates a Visual Studio solution for thrift and then builds it.
+:: Assumes third party libraries have been built or placed already.
+::
+:: Open a Visual Studio Command Prompt of your choosing and then
+:: run this script.
+::
+:: Normally the script will run cmake to generate a solution, then
+:: perform a build, then run tests on the complete thrift library
+:: in release mode.
+::
+:: Flags you can use to change this behavior:
+::
+:: /DEBUG - debug instead of release
+:: /IDE - launch Visual Studio with a path set
+:: up correctly to run tests instead of
+:: performing any other actions, i.e.
+:: implies setting the next three flags
+:: /NOGENERATE - skip cmake generation - useful if you
+:: have already generated a solution and just
+:: want to build
+:: /NOBUILD - skip cmake build - useful if you just
+:: want to generate a solution
+:: /NOTEST - skip ctest execution
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+:: Sets variables for third party versions used in build
+CALL scripts\tpversions.bat || EXIT /B
+
+IF NOT DEFINED PACKAGE_NAME SET PACKAGE_NAME=thrift
+IF NOT DEFINED PACKAGE_VERSION SET PACKAGE_VERSION=dev
+IF NOT DEFINED SOURCE_DIR SET SOURCEDIR=%~dp0%PACKAGE_NAME%
+IF NOT DEFINED WIN3P_ROOT SET WIN3P_ROOT=%~dp0thirdparty
+
+:: Set COMPILER to (vc100 - vc140) depending on the current environment
+CALL scripts\cl_setcompiler.bat || EXIT /B
+
+:: Set ARCH to either win32 or x64 depending on the current environment
+CALL scripts\cl_setarch.bat || EXIT /B
+
+:: Set GENERATOR for CMake depending on the current environment
+CALL scripts\cl_setgenerator.bat || EXIT /B
+
+:: Defaults
+
+IF NOT DEFINED BUILDTYPE SET BUILDTYPE=Release
+SET OPT_IDE=0
+SET OPT_BUILD=1
+SET OPT_GENERATE=1
+SET OPT_TEST=1
+
+:: Apply Flags
+
+IF /I "%1" == "/DEBUG" SET BUILDTYPE=Debug
+IF /I "%2" == "/DEBUG" SET BUILDTYPE=Debug
+IF /I "%3" == "/DEBUG" SET BUILDTYPE=Debug
+IF /I "%1" == "/IDE" SET OPT_IDE=1
+IF /I "%2" == "/IDE" SET OPT_IDE=1
+IF /I "%3" == "/IDE" SET OPT_IDE=1
+IF /I "%1" == "/NOBUILD" SET OPT_BUILD=0
+IF /I "%2" == "/NOBUILD" SET OPT_BUILD=0
+IF /I "%3" == "/NOBUILD" SET OPT_BUILD=0
+IF /I "%1" == "/NOGENERATE" SET OPT_GENERATE=0
+IF /I "%2" == "/NOGENERATE" SET OPT_GENERATE=0
+IF /I "%3" == "/NOGENERATE" SET OPT_GENERATE=0
+IF /I "%1" == "/NOTEST" SET OPT_TEST=0
+IF /I "%2" == "/NOTEST" SET OPT_TEST=0
+IF /I "%3" == "/NOTEST" SET OPT_TEST=0
+
+IF %OPT_IDE% == 1 (
+ SET OPT_GENERATE=0
+ SET OPT_BUILD=0
+ SET OPT_TEST=0
+)
+
+ SET BUILDDIR=%~dp0build\%PACKAGE_NAME%\%PACKAGE_VERSION%\%COMPILER%\%ARCH%\
+ SET OUTDIR=%~dp0dist\%PACKAGE_NAME%-%PACKAGE_VERSION%\%COMPILER%\%ARCH%\%BUILDTYPE%\
+ SET BOOST_LIBDIR=lib%ARCH:~-2,2%-msvc-%COMPILER:~-3,2%.0
+ IF "%BUILDTYPE%" == "Debug" (SET ZLIB_STATIC_SUFFIX=d)
+
+ ECHO/
+ ECHO =========================================================================
+ ECHO Configuration: %PACKAGE_NAME% %PACKAGE_VERSION% %COMPILER%:%ARCH%:%BUILDTYPE% "%GENERATOR%"
+IF DEFINED COMPILERONLY (
+ ECHO COMPILER ONLY
+)
+ ECHO Build Directory: %BUILDDIR%
+ ECHO Install Directory: %OUTDIR%
+ ECHO Source Directory: %SOURCEDIR%
+ ECHO =========================================================================
+ ECHO/
+
+IF %OPT_IDE% == 1 (
+
+ CALL :SETRUNPATH || EXIT /B
+ CALL DEVENV "!BUILDDIR!Apache Thrift.sln" || EXIT /B
+ EXIT /B
+
+)
+
+ MKDIR "%BUILDDIR%"
+ CD "%BUILDDIR%" || EXIT /B
+
+IF %OPT_GENERATE% == 1 (
+
+ CMAKE.EXE %~dp0thrift ^
+ -G"%GENERATOR%" ^
+ -DBISON_EXECUTABLE=%WIN3P_ROOT%\dist\winflexbison\win_bison.exe ^
+ -DBOOST_ROOT=%WIN3P_ROOT%\dist\boost_%TP_BOOST_VERSION% ^
+ -DBOOST_LIBRARYDIR=%WIN3P_ROOT%\dist\boost_%TP_BOOST_VERSION%\%BOOST_LIBDIR% ^
+ -DCMAKE_INSTALL_PREFIX=%OUTDIR% ^
+ -DCMAKE_BUILD_TYPE=%BUILDTYPE% ^
+ -DFLEX_EXECUTABLE=%WIN3P_ROOT%\dist\winflexbison\win_flex.exe ^
+ -DINTTYPES_ROOT=%WIN3P_ROOT%\dist\msinttypes ^
+ -DLIBEVENT_ROOT=%WIN3P_ROOT%\dist\libevent-%TP_LIBEVENT_VERSION%\%COMPILER%\%ARCH%\%BUILDTYPE% ^
+ -DOPENSSL_ROOT_DIR=%WIN3P_ROOT%\dist\openssl-%TP_OPENSSL_VERSION%\%COMPILER%\%ARCH%\%BUILDTYPE%\dynamic ^
+ -DOPENSSL_USE_STATIC_LIBS=OFF ^
+ -DZLIB_LIBRARY=%WIN3P_ROOT%\dist\zlib-%TP_ZLIB_VERSION%\%COMPILER%\%ARCH%\lib\zlib%ZLIB_LIB_SUFFIX%.lib ^
+ -DZLIB_ROOT=%WIN3P_ROOT%\dist\zlib-%TP_ZLIB_VERSION%\%COMPILER%\%ARCH% ^
+ -DWITH_BOOSTTHREADS=ON ^
+ -DWITH_SHARED_LIB=OFF ^
+ -DWITH_STATIC_LIB=ON || EXIT /B
+
+)
+
+IF %OPT_BUILD% == 1 (
+
+ CD %BUILDDIR%
+ CMAKE.EXE --build . --config %BUILDTYPE% --target INSTALL || EXIT /B
+
+)
+
+IF %OPT_TEST% == 1 (
+
+ CALL :SETRUNPATH || EXIT /B
+ CMAKE.EXE --build . --config %BUILDTYPE% --target RUN_TESTS || EXIT /B
+
+)
+
+:SETRUNPATH
+ SET PATH=!PATH!;%WIN3P_ROOT%\dist\boost_%TP_BOOST_VERSION%\%BOOST_LIBDIR%
+ SET PATH=!PATH!;%WIN3P_ROOT%\dist\openssl-%TP_OPENSSL_VERSION%\%COMPILER%\%ARCH%\%BUILDTYPE%\dynamic\bin
+ SET PATH=!PATH!;%WIN3P_ROOT%\dist\zlib-%TP_ZLIB_VERSION%\%COMPILER%\%ARCH%\bin
+ EXIT /B
+
+ENDLOCAL
+EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setarch.bat b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setarch.bat
new file mode 100644
index 000000000..9570a1e85
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setarch.bat
@@ -0,0 +1,47 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Detect the architecture we're building for.
+:: Set the ARCH environment variable to one of:
+:: win32
+:: x64
+::
+:: Honors any existing ARCH environment variable
+:: setting instead of overwriting it, to allow it
+:: to be forced if needed.
+::
+:: Sets ERRORLEVEL to 0 if ARCH can be determined,
+:: to 1 if it cannot.
+::
+
+IF DEFINED ARCH (
+ ECHO [warn ] using existing environment variable ARCH
+ EXIT /B 0
+)
+
+CALL :CHECK x64
+IF %ERRORLEVEL% == 0 (SET ARCH=x64) ELSE (SET ARCH=win32)
+
+IF NOT DEFINED ARCH (
+ ECHO [error] unable to determine the target architecture
+ EXIT /B 1
+)
+
+ECHO [info ] detected target architecture %ARCH%
+EXIT /B 0
+
+:CHECK
+cl /? 2>&1 | findstr /C:" for %1%" > nul
+EXIT /B %ERRORLEVEL%
diff --git a/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setcompiler.bat b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setcompiler.bat
new file mode 100644
index 000000000..8405d7616
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setcompiler.bat
@@ -0,0 +1,58 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Detect the compiler edition we're building in.
+:: Set the COMPILER environment variable to one of:
+:: vc100 = Visual Studio 2010
+:: vc110 = Visual Studio 2012
+:: vc120 = Visual Studio 2013
+:: vc140 = Visual Studio 2015
+:: vc150 = Visual Studio 2017
+::
+:: Honors any existing COMPILER environment variable
+:: setting instead of overwriting it, to allow it
+:: to be forced if needed.
+::
+:: Sets ERRORLEVEL to 0 if COMPILER can be determined,
+:: to 1 if it cannot.
+::
+
+IF DEFINED COMPILER (
+ ECHO [warn ] using existing environment variable COMPILER
+ EXIT /B 0
+)
+
+CALL :CHECK 16
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED COMPILER SET COMPILER=vc100)
+CALL :CHECK 17
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED COMPILER SET COMPILER=vc110)
+CALL :CHECK 18
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED COMPILER SET COMPILER=vc120)
+CALL :CHECK 19.00
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED COMPILER SET COMPILER=vc140)
+CALL :CHECK 19.10
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED COMPILER SET COMPILER=vc150)
+
+IF NOT DEFINED COMPILER (
+ ECHO [error] unable to determine the compiler edition
+ EXIT /B 1
+)
+
+ECHO [info ] detected compiler edition %COMPILER%
+EXIT /B 0
+
+:CHECK
+cl /? 2>&1 | findstr /C:"Version %1%." > nul
+EXIT /B %ERRORLEVEL%
diff --git a/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setgenerator.bat b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setgenerator.bat
new file mode 100644
index 000000000..bae2742f7
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/scripts/cl_setgenerator.bat
@@ -0,0 +1,69 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Detect the compiler edition we're building in and then
+:: set the GENERATOR environment variable to one of:
+::
+:: Visual Studio 15 2017 [arch] = Generates Visual Studio 2017 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 14 2015 [arch] = Generates Visual Studio 2015 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 12 2013 [arch] = Generates Visual Studio 2013 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 11 2012 [arch] = Generates Visual Studio 2012 project files.
+:: Optional [arch] can be "Win64" or "ARM".
+:: Visual Studio 10 2010 [arch] = Generates Visual Studio 2010 project files.
+:: Optional [arch] can be "Win64" or "IA64".
+::
+:: Honors any existing GENERATOR environment variable
+:: setting instead of overwriting it, to allow it
+:: to be forced if needed.
+::
+:: Sets ERRORLEVEL to 0 if GENERATOR can be determined,
+:: to 1 if it cannot.
+::
+:: Requires cl_setarch.bat to have been executed or the ARCH environment
+:: variable to be set.
+::
+
+IF "%ARCH%" == "x64" (SET GENARCH= Win64)
+
+IF DEFINED GENERATOR (
+ ECHO [warn ] using existing environment variable GENERATOR
+ EXIT /B 0
+)
+
+CALL :CHECK 16
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED GENERATOR SET GENERATOR=Visual Studio 10 2010%GENARCH%)
+CALL :CHECK 17
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED GENERATOR SET GENERATOR=Visual Studio 11 2012%GENARCH%)
+CALL :CHECK 18
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED GENERATOR SET GENERATOR=Visual Studio 12 2013%GENARCH%)
+CALL :CHECK 19.00
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED GENERATOR SET GENERATOR=Visual Studio 14 2015%GENARCH%)
+CALL :CHECK 19.10
+IF %ERRORLEVEL% == 0 (IF NOT DEFINED GENERATOR SET GENERATOR=Visual Studio 15 2017%GENARCH%)
+
+IF NOT DEFINED GENERATOR (
+ ECHO [error] unable to determine the CMake generator to use
+ EXIT /B 1
+)
+
+ECHO [info ] using CMake generator %GENERATOR%
+EXIT /B 0
+
+:CHECK
+cl /? 2>&1 | findstr /C:"Version %1%." > nul
+EXIT /B %ERRORLEVEL%
diff --git a/vendor/github.com/apache/thrift/build/wincpp/scripts/tpversions.bat b/vendor/github.com/apache/thrift/build/wincpp/scripts/tpversions.bat
new file mode 100644
index 000000000..d80c86878
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/scripts/tpversions.bat
@@ -0,0 +1,24 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Set the versions of third party libraries to use.
+::
+
+IF NOT DEFINED TP_BOOST_VERSION SET TP_BOOST_VERSION=1_62_0
+IF NOT DEFINED TP_LIBEVENT_VERSION SET TP_LIBEVENT_VERSION=2.1.7rc2
+IF NOT DEFINED TP_OPENSSL_VERSION SET TP_OPENSSL_VERSION=1.1.0c
+IF NOT DEFINED TP_ZLIB_VERSION SET TP_ZLIB_VERSION=1.2.9
+
+EXIT /B 0
diff --git a/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-libevent.bat b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-libevent.bat
new file mode 100644
index 000000000..4af505c61
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-libevent.bat
@@ -0,0 +1,86 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Build script for libevent on windows
+:: Use libevent master from github which has cmake integration
+:: Uses the environment set up by a Visual Studio Command Prompt shortcut
+:: to target a specific architecture and compiler
+::
+:: Creates a static link library.
+:: Links against OpenSSL and zlib statically.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+:: Sets variables for third party versions used in build
+CALL ..\..\scripts\tpversions.bat || EXIT /B
+
+:: use "build-libevent.bat /yes" to skip the question part
+IF /I "%1" == "/YES" SET NOASK=1
+
+:: Set COMPILER to (vc100 - vc140) depending on the current environment
+CALL ..\..\scripts\cl_setcompiler.bat || EXIT /B
+
+:: Set ARCH to either win32 or x64 depending on the current environment
+CALL ..\..\scripts\cl_setarch.bat || EXIT /B
+
+IF NOT DEFINED GENERATOR SET GENERATOR=NMake Makefiles
+IF NOT DEFINED PACKAGE_NAME SET PACKAGE_NAME=libevent
+IF NOT DEFINED PACKAGE_VERSION SET PACKAGE_VERSION=%TP_LIBEVENT_VERSION%
+IF NOT DEFINED SOURCEDIR SET SOURCEDIR=%~dp0%PACKAGE_NAME%-%PACKAGE_VERSION%
+IF NOT DEFINED WIN3P_ROOT SET WIN3P_ROOT=%~dp0..
+
+FOR %%X IN (
+ Debug
+ Release
+) DO (
+ SET BUILDTYPE=%%X
+ SET BUILDDIR=%WIN3P_ROOT%\build\%PACKAGE_NAME%\%PACKAGE_VERSION%\%COMPILER%\%ARCH%\!BUILDTYPE!
+ SET OUTDIR=%WIN3P_ROOT%\dist\%PACKAGE_NAME%-%PACKAGE_VERSION%\%COMPILER%\%ARCH%\!BUILDTYPE!
+
+ IF "!BUILDTYPE!" == "Debug" (SET ZLIB_LIB_SUFFIX=d)
+
+ SET CMAKE_DEFS=^
+ -DEVENT__DISABLE_SAMPLES=ON ^
+ -DEVENT__DISABLE_TESTS=ON ^
+ -DOPENSSL_USE_STATIC_LIBS=OFF ^
+ -DOPENSSL_ROOT_DIR=%WIN3P_ROOT%\dist\openssl-%TP_OPENSSL_VERSION%\%COMPILER%\%ARCH%\!BUILDTYPE!\dynamic ^
+ -DZLIB_LIBRARY=%WIN3P_ROOT%\dist\zlib-%TP_ZLIB_VERSION%\%COMPILER%\%ARCH%\lib\zlib!ZLIB_LIB_SUFFIX!.lib ^
+ -DZLIB_ROOT=%WIN3P_ROOT%\dist\zlib-%TP_ZLIB_VERSION%\%COMPILER%\%ARCH%
+
+ ECHO/
+ ECHO =========================================================================
+ ECHO Building: %PACKAGE_NAME% v%PACKAGE_VERSION% %COMPILER%:%ARCH%:!BUILDTYPE! "%GENERATOR%"
+ ECHO CMake Definitions: !CMAKE_DEFS!
+ ECHO Build Directory: !BUILDDIR!
+ ECHO Install Directory: !OUTDIR!
+ ECHO Source Directory: %SOURCEDIR%
+ ECHO =========================================================================
+ ECHO/
+
+ IF NOT DEFINED NOASK (
+ CHOICE /M "Do you want to build this configuration? " /c YN
+ IF !ERRORLEVEL! NEQ 1 (EXIT /B !ERRORLEVEL!)
+ )
+
+ MKDIR "!BUILDDIR!"
+ CD "!BUILDDIR!" || EXIT /B
+
+ CMAKE.EXE -G"%GENERATOR%" -DCMAKE_INSTALL_PREFIX=!OUTDIR! -DCMAKE_BUILD_TYPE=!BUILDTYPE! !CMAKE_DEFS! "%SOURCEDIR%" || EXIT /B
+ NMAKE /fMakefile install || EXIT /B
+)
+
+ENDLOCAL
diff --git a/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-openssl.bat b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-openssl.bat
new file mode 100644
index 000000000..cf270f05b
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-openssl.bat
@@ -0,0 +1,106 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Build script for openssl on windows
+:: openssl uses an in-tree build so you have to clean between each one
+::
+:: Uses the environment set up by a Visual Studio Command Prompt shortcut
+:: to target a specific architecture and compiler
+::
+:: If you use Lavasoft Ad-Aware, disable it for this build. It blocks the creation
+:: of any file named "clienthellotest.exe" for whatever reason, which breaks the build.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+:: Sets variables for third party versions used in build
+CALL ..\..\scripts\tpversions.bat || EXIT /B
+
+:: use "build-openssl.bat /yes" to skip the question part
+IF /I "%1" == "/YES" SET NOASK=1
+
+IF NOT DEFINED PACKAGE_NAME SET PACKAGE_NAME=openssl
+IF NOT DEFINED PACKAGE_VERSION SET PACKAGE_VERSION=%TP_OPENSSL_VERSION%
+IF NOT DEFINED SOURCEDIR SET SOURCEDIR=%~dp0%PACKAGE_NAME%-%PACKAGE_VERSION%
+IF NOT DEFINED WIN3P_ROOT SET WIN3P_ROOT=%~dp0..
+
+:: Set COMPILER to (vc100 - vc140) depending on the current environment
+CALL ..\..\scripts\cl_setcompiler.bat || EXIT /B
+
+:: Set ARCH to either win32 or x64 depending on the current environment
+CALL ..\..\scripts\cl_setarch.bat || EXIT /B
+
+IF "%ARCH%" == "x64" (
+ SET TODO=debug-VC-WIN64A VC-WIN64A
+) ELSE (
+ SET TODO=debug-VC-WIN32 VC-WIN32
+)
+
+FOR %%X IN ( !TODO! ) DO (
+ SET BUILDTYPE=%%X
+ FOR %%Y IN (
+ nt
+ ntdll
+ ) DO (
+ SET LIBTYPE=%%Y
+
+ IF "!BUILDTYPE:~0,6!" == "debug-" (
+ SET OUTBUILDTYPE=debug
+ SET ZLIBLIBSUFFIX=d
+ ) ELSE (
+ SET OUTBUILDTYPE=release
+ SET ZLIBLIBSUFFIX=
+ )
+
+ IF "!LIBTYPE!" == "ntdll" (
+ SET BUILD_OPTIONS=shared
+ SET OUTLIBTYPE=dynamic
+ SET ZLIBLIB=zlib!ZLIBLIBSUFFIX!
+ SET ZLIBOPT=zlib-dynamic
+ ) ELSE (
+ SET BUILD_OPTIONS=no-shared
+ SET OUTLIBTYPE=static
+ SET ZLIBLIB=zlibstatic!ZLIBLIBSUFFIX!.lib
+ SET ZLIBOPT=zlib
+ )
+
+ SET LIB=%~dp0..\dist\zlib-%TP_ZLIB_VERSION%\!COMPILER!\!ARCH!\lib;!LIB!
+ SET BUILD_OPTIONS=!BUILD_OPTIONS! no-asm no-unit-test !ZLIBOPT! --openssldir=ssl --with-zlib-include=%~dp0..\dist\zlib-%TP_ZLIB_VERSION%\!COMPILER!\!ARCH!\include --with-zlib-lib=!ZLIBLIB!
+ SET OUTDIR=%WIN3P_ROOT%\dist\%PACKAGE_NAME%-%PACKAGE_VERSION%\%COMPILER%\%ARCH%\!OUTBUILDTYPE!\!OUTLIBTYPE!
+
+ ECHO/
+ ECHO =========================================================================
+ ECHO Building: %PACKAGE_NAME% %PACKAGE_VERSION% %COMPILER%:%ARCH%:!OUTBUILDTYPE!:!OUTLIBTYPE! [!BUILDTYPE!]
+ ECHO Configure Options: !BUILD_OPTIONS!
+ ECHO Install Directory: !OUTDIR!
+ ECHO Source Directory: %SOURCEDIR%
+ ECHO =========================================================================
+ ECHO/
+
+ IF NOT DEFINED NOASK (
+ CHOICE /M "Do you want to build this configuration? " /c YN
+ IF !ERRORLEVEL! NEQ 1 (EXIT /B !ERRORLEVEL!)
+ )
+
+ CD %SOURCEDIR% || EXIT /B
+ perl Configure !BUILDTYPE! --prefix="!OUTDIR!" !BUILD_OPTIONS! || EXIT /B
+ NMAKE /FMakefile install_sw || EXIT /B
+ NMAKE /FMakefile clean || EXIT /B
+ )
+)
+
+ENDLOCAL
+EXIT /B
diff --git a/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-zlib.bat b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-zlib.bat
new file mode 100644
index 000000000..2427230d0
--- /dev/null
+++ b/vendor/github.com/apache/thrift/build/wincpp/thirdparty/src/build-zlib.bat
@@ -0,0 +1,75 @@
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+::
+:: Build script for zlib on windows.
+:: Uses the environment set up by a Visual Studio Command Prompt shortcut
+:: to target a specific architecture and compiler.
+::
+
+@ECHO OFF
+SETLOCAL EnableDelayedExpansion
+
+:: Sets variables for third party versions used in build
+CALL ..\..\scripts\tpversions.bat || EXIT /B
+
+:: use "build-zlib.bat /yes" to skip the question part
+IF /I "%1" == "/YES" SET NOASK=1
+
+IF NOT DEFINED GENERATOR SET GENERATOR=NMake Makefiles
+IF NOT DEFINED PACKAGE_NAME SET PACKAGE_NAME=zlib
+IF NOT DEFINED PACKAGE_VERSION SET PACKAGE_VERSION=%TP_ZLIB_VERSION%
+IF NOT DEFINED SOURCE_DIR SET SOURCEDIR=%~dp0%PACKAGE_NAME%-%PACKAGE_VERSION%
+IF NOT DEFINED WIN3P_ROOT SET WIN3P_ROOT=%~dp0..
+
+:: Set COMPILER to (vc100 - vc140) depending on the current environment
+CALL ..\..\scripts\cl_setcompiler.bat || EXIT /B
+
+:: Set ARCH to either win32 or x64 depending on the current environment
+CALL ..\..\scripts\cl_setarch.bat || EXIT /B
+
+FOR %%X IN (
+ Debug
+ Release
+) DO (
+ SET BUILDTYPE=%%X
+ SET BUILDDIR=%WIN3P_ROOT%\build\%PACKAGE_NAME%\%PACKAGE_VERSION%\%COMPILER%\%ARCH%\!BUILDTYPE!
+ SET OUTDIR=%WIN3P_ROOT%\dist\%PACKAGE_NAME%-%PACKAGE_VERSION%\%COMPILER%\%ARCH%
+
+ ECHO/
+ ECHO =========================================================================
+ ECHO Building: %PACKAGE_NAME% v%PACKAGE_VERSION% %COMPILER%:%ARCH%:!BUILDTYPE! "%GENERATOR%"
+ ECHO Build Directory: !BUILDDIR!
+ ECHO Install Directory: !OUTDIR!
+ ECHO Source Directory: %SOURCEDIR%
+ ECHO =========================================================================
+ ECHO/
+
+ IF NOT DEFINED NOASK (
+ CHOICE /M "Do you want to build this configuration? " /c YN
+ IF !ERRORLEVEL! NEQ 1 (EXIT /B !ERRORLEVEL!)
+ )
+
+ MKDIR "!BUILDDIR!"
+ CD "!BUILDDIR!" || EXIT /B
+
+ CMAKE.EXE -G"%GENERATOR%" -DCMAKE_INSTALL_PREFIX=!OUTDIR! -DCMAKE_BUILD_TYPE=!BUILDTYPE! "%SOURCEDIR%" || EXIT /B
+ NMAKE /fMakefile install || EXIT /B
+
+ IF "!BUILDTYPE!" == "Debug" (
+ COPY "!BUILDDIR!\zlibd.pdb" "!OUTDIR!\bin\" || EXIT /B
+ )
+)
+
+ENDLOCAL
diff --git a/vendor/github.com/apache/thrift/cleanup.sh b/vendor/github.com/apache/thrift/cleanup.sh
new file mode 100755
index 000000000..f110721ac
--- /dev/null
+++ b/vendor/github.com/apache/thrift/cleanup.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+topsrcdir="`dirname $0`"
+cd "$topsrcdir"
+
+make -k clean >/dev/null 2>&1
+make -k distclean >/dev/null 2>&1
+find . -name Makefile.in -exec rm -f {} \;
+rm -rf \
+AUTHORS \
+ChangeLog \
+INSTALL \
+Makefile \
+Makefile.in \
+Makefile.orig \
+aclocal/libtool.m4 \
+aclocal/ltoptions.m4 \
+aclocal/ltsugar.m4 \
+aclocal/ltversion.m4 \
+aclocal/lt~obsolete.m4 \
+aclocal.m4 \
+autom4te.cache \
+autoscan.log \
+config.guess \
+config.h \
+config.hin \
+config.hin~ \
+config.log \
+config.status \
+config.status.lineno \
+config.sub \
+configure \
+configure.lineno \
+configure.scan \
+depcomp \
+.deps \
+install-sh \
+.libs \
+libtool \
+ltmain.sh \
+missing \
+ylwrap \
+if/gen-* \
+test/gen-* \
+lib/php/src/ext/thrift_protocol/.deps \
+lib/php/src/ext/thrift_protocol/Makefile \
+lib/php/src/ext/thrift_protocol/Makefile.fragments \
+lib/php/src/ext/thrift_protocol/Makefile.global \
+lib/php/src/ext/thrift_protocol/Makefile.objects \
+lib/php/src/ext/thrift_protocol/acinclude.m4 \
+lib/php/src/ext/thrift_protocol/aclocal.m4 \
+lib/php/src/ext/thrift_protocol/autom4te.cache \
+lib/php/src/ext/thrift_protocol/build \
+lib/php/src/ext/thrift_protocol/config.guess \
+lib/php/src/ext/thrift_protocol/config.h \
+lib/php/src/ext/thrift_protocol/config.h.in \
+lib/php/src/ext/thrift_protocol/config.log \
+lib/php/src/ext/thrift_protocol/config.nice \
+lib/php/src/ext/thrift_protocol/config.status \
+lib/php/src/ext/thrift_protocol/config.sub \
+lib/php/src/ext/thrift_protocol/configure \
+lib/php/src/ext/thrift_protocol/configure.in \
+lib/php/src/ext/thrift_protocol/include \
+lib/php/src/ext/thrift_protocol/install-sh \
+lib/php/src/ext/thrift_protocol/libtool \
+lib/php/src/ext/thrift_protocol/ltmain.sh \
+lib/php/src/ext/thrift_protocol/missing \
+lib/php/src/ext/thrift_protocol/mkinstalldirs \
+lib/php/src/ext/thrift_protocol/modules \
+lib/php/src/ext/thrift_protocol/run-tests.php
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/CMakeLists.txt b/vendor/github.com/apache/thrift/compiler/cpp/CMakeLists.txt
new file mode 100644
index 000000000..8e861e41c
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/CMakeLists.txt
@@ -0,0 +1,216 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+cmake_minimum_required(VERSION 2.8.12)
+
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/thrift/version.h.in ${CMAKE_CURRENT_BINARY_DIR}/thrift/version.h)
+if(MSVC)
+ # The winflexbison generator outputs some macros that conflict with the Visual Studio 2010 copy of stdint.h
+ # This might be fixed in later versions of Visual Studio, but an easy solution is to include stdint.h first
+ if(HAVE_STDINT_H)
+ add_definitions(-D__STDC_LIMIT_MACROS)
+ add_definitions(/FI"stdint.h")
+ endif(HAVE_STDINT_H)
+endif()
+
+find_package(FLEX REQUIRED)
+find_package(BISON REQUIRED)
+
+# create directory for thrifty and thriftl
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/thrift/)
+
+# Create flex and bison files and build the lib parse static library
+BISON_TARGET(thrifty ${CMAKE_CURRENT_SOURCE_DIR}/src/thrift/thrifty.yy ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.cc)
+FLEX_TARGET(thriftl ${CMAKE_CURRENT_SOURCE_DIR}/src/thrift/thriftl.ll ${CMAKE_CURRENT_BINARY_DIR}/thrift/thriftl.cc)
+ADD_FLEX_BISON_DEPENDENCY(thriftl thrifty)
+
+set(parse_SOURCES
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/thriftl.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.hh
+)
+
+add_library(parse STATIC ${parse_SOURCES})
+
+# Create the thrift compiler
+set(compiler_core
+ src/thrift/common.cc
+ src/thrift/generate/t_generator.cc
+ src/thrift/parse/t_typedef.cc
+ src/thrift/parse/parse.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/version.h
+)
+
+set(thrift-compiler_SOURCES
+ src/thrift/main.cc
+ src/thrift/audit/t_audit.cpp
+)
+
+# This macro adds an option THRIFT_COMPILER_${NAME}
+# that allows enabling or disabling certain languages
+macro(THRIFT_ADD_COMPILER name description initial)
+ string(TOUPPER "THRIFT_COMPILER_${name}" enabler)
+ set(src "src/thrift/generate/t_${name}_generator.cc")
+ option(${enabler} ${description} ${initial})
+ if(${enabler})
+ list(APPEND thrift-compiler_SOURCES ${src})
+ endif()
+endmacro()
+
+# The following compiler can be enabled or disabled
+THRIFT_ADD_COMPILER(c_glib "Enable compiler for C with Glib" ON)
+THRIFT_ADD_COMPILER(cpp "Enable compiler for C++" ON)
+THRIFT_ADD_COMPILER(java "Enable compiler for Java" ON)
+THRIFT_ADD_COMPILER(as3 "Enable compiler for ActionScript 3" ON)
+THRIFT_ADD_COMPILER(dart "Enable compiler for Dart" ON)
+THRIFT_ADD_COMPILER(haxe "Enable compiler for Haxe" ON)
+THRIFT_ADD_COMPILER(csharp "Enable compiler for C#" ON)
+THRIFT_ADD_COMPILER(netcore "Enable compiler for .NET Core" ON)
+THRIFT_ADD_COMPILER(py "Enable compiler for Python 2.0" ON)
+THRIFT_ADD_COMPILER(rb "Enable compiler for Ruby" ON)
+THRIFT_ADD_COMPILER(perl "Enable compiler for Perl" ON)
+THRIFT_ADD_COMPILER(php "Enable compiler for PHP" ON)
+THRIFT_ADD_COMPILER(erl "Enable compiler for Erlang" ON)
+THRIFT_ADD_COMPILER(cocoa "Enable compiler for Cocoa Objective-C" ON)
+THRIFT_ADD_COMPILER(swift "Enable compiler for Cocoa Swift" ON)
+THRIFT_ADD_COMPILER(st "Enable compiler for Smalltalk" ON)
+THRIFT_ADD_COMPILER(ocaml "Enable compiler for OCaml" ON)
+THRIFT_ADD_COMPILER(hs "Enable compiler for Haskell" ON)
+THRIFT_ADD_COMPILER(xsd "Enable compiler for XSD" ON)
+THRIFT_ADD_COMPILER(html "Enable compiler for HTML Documentation" ON)
+THRIFT_ADD_COMPILER(js "Enable compiler for JavaScript" ON)
+THRIFT_ADD_COMPILER(json "Enable compiler for JSON" ON)
+THRIFT_ADD_COMPILER(javame "Enable compiler for Java ME" ON)
+THRIFT_ADD_COMPILER(delphi "Enable compiler for Delphi" ON)
+THRIFT_ADD_COMPILER(go "Enable compiler for Go" ON)
+THRIFT_ADD_COMPILER(d "Enable compiler for D" ON)
+THRIFT_ADD_COMPILER(lua "Enable compiler for Lua" ON)
+THRIFT_ADD_COMPILER(gv "Enable compiler for GraphViz" ON)
+THRIFT_ADD_COMPILER(rs "Enable compiler for Rust" ON)
+THRIFT_ADD_COMPILER(xml "Enable compiler for XML" ON)
+
+# Thrift is looking for include files in the src directory
+# we also add the current binary directory for generated files
+include_directories(${CMAKE_CURRENT_BINARY_DIR} src)
+
+if(NOT ${WITH_PLUGIN})
+ list(APPEND thrift-compiler_SOURCES ${compiler_core})
+endif()
+
+add_executable(thrift-compiler ${thrift-compiler_SOURCES})
+
+if(${WITH_PLUGIN})
+ add_executable(thrift-bootstrap ${compiler_core}
+ src/thrift/main.cc
+ src/thrift/audit/t_audit.cpp
+ src/thrift/generate/t_cpp_generator.cc
+ )
+ target_link_libraries(thrift-bootstrap parse)
+
+ set(PLUGIN_GEN_SOURCES
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin/plugin_types.h
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin/plugin_types.cpp
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin/plugin_constants.h
+ ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin/plugin_constants.cpp
+ )
+
+ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin)
+ add_custom_command(OUTPUT ${PLUGIN_GEN_SOURCES}
+ DEPENDS thrift-bootstrap src/thrift/plugin/plugin.thrift
+ COMMAND thrift-bootstrap -gen cpp
+ -out ${CMAKE_CURRENT_BINARY_DIR}/thrift/plugin
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/thrift/plugin/plugin.thrift
+ )
+
+ include_directories(../../lib/cpp/src)
+
+ include(ThriftMacros)
+ ADD_LIBRARY_THRIFT(thriftc
+ ${compiler_core}
+ ${PLUGIN_GEN_SOURCES}
+ src/thrift/logging.cc
+ src/thrift/plugin/plugin_output.cc
+ src/thrift/plugin/plugin.cc
+ )
+ TARGET_INCLUDE_DIRECTORIES_THRIFT(thriftc PUBLIC ${Boost_INCLUDE_DIRS})
+ TARGET_LINK_LIBRARIES_THRIFT_AGAINST_THRIFT_LIBRARY(thriftc thrift PUBLIC)
+ target_compile_definitions(thrift-compiler PUBLIC THRIFT_ENABLE_PLUGIN)
+ LINK_AGAINST_THRIFT_LIBRARY(thrift-compiler thriftc)
+endif()
+
+set_target_properties(thrift-compiler PROPERTIES RUNTIME_OUTPUT_DIRECTORY bin/)
+set_target_properties(thrift-compiler PROPERTIES OUTPUT_NAME thrift)
+
+target_link_libraries(thrift-compiler parse)
+
+install(TARGETS thrift-compiler DESTINATION bin)
+
+if(${WITH_PLUGIN})
+ # Install the headers
+ install(FILES
+ "src/thrift/common.h"
+ "src/thrift/globals.h"
+ "src/thrift/logging.h"
+ "src/thrift/main.h"
+ "src/thrift/platform.h"
+ "${CMAKE_BINARY_DIR}/compiler/cpp/thrift/version.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift")
+ install(FILES
+ "src/thrift/audit/t_audit.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift/audit")
+ install(FILES
+ "src/thrift/generate/t_generator.h"
+ "src/thrift/generate/t_generator_registry.h"
+ "src/thrift/generate/t_html_generator.h"
+ "src/thrift/generate/t_oop_generator.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift/generate")
+ install(FILES
+ "src/thrift/parse/t_base_type.h"
+ "src/thrift/parse/t_const.h"
+ "src/thrift/parse/t_const_value.h"
+ "src/thrift/parse/t_container.h"
+ "src/thrift/parse/t_doc.h"
+ "src/thrift/parse/t_enum.h"
+ "src/thrift/parse/t_enum_value.h"
+ "src/thrift/parse/t_field.h"
+ "src/thrift/parse/t_function.h"
+ "src/thrift/parse/t_list.h"
+ "src/thrift/parse/t_map.h"
+ "src/thrift/parse/t_program.h"
+ "src/thrift/parse/t_scope.h"
+ "src/thrift/parse/t_service.h"
+ "src/thrift/parse/t_set.h"
+ "src/thrift/parse/t_struct.h"
+ "src/thrift/parse/t_typedef.h"
+ "src/thrift/parse/t_type.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift/parse")
+ install(FILES
+ "src/thrift/plugin/plugin.h"
+ "src/thrift/plugin/plugin_output.h"
+ "src/thrift/plugin/type_util.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift/plugin")
+if(MSVC)
+ install(FILES
+ "src/thrift/windows/config.h"
+ DESTINATION "${INCLUDE_INSTALL_DIR}/thrift/windows")
+endif()
+endif()
+
+if(BUILD_TESTING)
+ add_subdirectory(test)
+endif()
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/Makefile.am b/vendor/github.com/apache/thrift/compiler/cpp/Makefile.am
new file mode 100644
index 000000000..50820334d
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/Makefile.am
@@ -0,0 +1,205 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# Contains some contributions under the Thrift Software License.
+# Please see doc/old-thrift-license.txt in the Thrift distribution for
+# details.
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+# Note on why we have src/thrift and src/thrift/plugin directories:
+# Since Automake supports only one set of BUILT_SOURCES per file and does not allow
+# SUBDIRS built before BUILT_SOURCES, we end up separate Makefile.am for each source
+# code generation, i.e. lex-yacc and Thrift, to achieve stable parallel make.
+
+SUBDIRS = src src/thrift/plugin .
+if WITH_TESTS
+SUBDIRS += test
+endif
+
+bin_PROGRAMS = thrift
+
+thrift_OBJDIR = obj
+
+plugin_gen = src/thrift/plugin/plugin_types.h \
+ src/thrift/plugin/plugin_types.cpp \
+ src/thrift/plugin/plugin_constants.h \
+ src/thrift/plugin/plugin_constants.cpp
+
+compiler_core = src/thrift/common.h \
+ src/thrift/common.cc \
+ src/thrift/generate/t_generator.cc \
+ src/thrift/generate/t_generator_registry.h \
+ src/thrift/globals.h \
+ src/thrift/platform.h \
+ src/thrift/logging.h \
+ src/thrift/parse/t_doc.h \
+ src/thrift/parse/t_type.h \
+ src/thrift/parse/t_base_type.h \
+ src/thrift/parse/t_enum.h \
+ src/thrift/parse/t_enum_value.h \
+ src/thrift/parse/t_typedef.h \
+ src/thrift/parse/t_typedef.cc \
+ src/thrift/parse/t_container.h \
+ src/thrift/parse/t_list.h \
+ src/thrift/parse/t_set.h \
+ src/thrift/parse/t_map.h \
+ src/thrift/parse/t_struct.h \
+ src/thrift/parse/t_field.h \
+ src/thrift/parse/t_service.h \
+ src/thrift/parse/t_function.h \
+ src/thrift/parse/t_program.h \
+ src/thrift/parse/t_scope.h \
+ src/thrift/parse/t_const.h \
+ src/thrift/parse/t_const_value.h \
+ src/thrift/parse/parse.cc \
+ src/thrift/generate/t_generator.h \
+ src/thrift/generate/t_oop_generator.h \
+ src/thrift/generate/t_html_generator.h
+
+thrift_SOURCES = src/thrift/main.h \
+ src/thrift/main.cc \
+ src/thrift/audit/t_audit.cpp \
+ src/thrift/audit/t_audit.h
+
+# Specific client generator source
+thrift_SOURCES += src/thrift/generate/t_c_glib_generator.cc \
+ src/thrift/generate/t_cpp_generator.cc \
+ src/thrift/generate/t_java_generator.cc \
+ src/thrift/generate/t_json_generator.cc \
+ src/thrift/generate/t_as3_generator.cc \
+ src/thrift/generate/t_dart_generator.cc \
+ src/thrift/generate/t_haxe_generator.cc \
+ src/thrift/generate/t_csharp_generator.cc \
+ src/thrift/generate/t_netcore_generator.cc \
+ src/thrift/generate/t_py_generator.cc \
+ src/thrift/generate/t_rb_generator.cc \
+ src/thrift/generate/t_perl_generator.cc \
+ src/thrift/generate/t_php_generator.cc \
+ src/thrift/generate/t_erl_generator.cc \
+ src/thrift/generate/t_cocoa_generator.cc \
+ src/thrift/generate/t_swift_generator.cc \
+ src/thrift/generate/t_st_generator.cc \
+ src/thrift/generate/t_ocaml_generator.cc \
+ src/thrift/generate/t_hs_generator.cc \
+ src/thrift/generate/t_xsd_generator.cc \
+ src/thrift/generate/t_xml_generator.cc \
+ src/thrift/generate/t_html_generator.cc \
+ src/thrift/generate/t_js_generator.cc \
+ src/thrift/generate/t_javame_generator.cc \
+ src/thrift/generate/t_delphi_generator.cc \
+ src/thrift/generate/t_go_generator.cc \
+ src/thrift/generate/t_gv_generator.cc \
+ src/thrift/generate/t_d_generator.cc \
+ src/thrift/generate/t_lua_generator.cc \
+ src/thrift/generate/t_rs_generator.cc
+
+thrift_CPPFLAGS = -I$(srcdir)/src
+thrift_CXXFLAGS = -Wall -Wextra -pedantic
+thrift_LDADD = @LEXLIB@ src/thrift/libparse.a
+
+if !WITH_PLUGIN
+thrift_SOURCES += $(compiler_core)
+else
+
+lib_LTLIBRARIES = libthriftc.la
+
+thrift_CPPFLAGS += -DTHRIFT_ENABLE_PLUGIN=1
+thrift_LDADD += libthriftc.la
+
+nodist_libthriftc_la_SOURCES = $(plugin_gen)
+libthriftc_la_SOURCES = $(compiler_core) \
+ src/thrift/plugin/type_util.h \
+ src/thrift/plugin/plugin.h \
+ src/thrift/plugin/plugin.cc \
+ src/thrift/plugin/plugin_output.h \
+ src/thrift/plugin/plugin_output.cc \
+ src/thrift/plugin/plugin.thrift \
+ src/thrift/logging.cc
+
+
+libthriftc_la_CPPFLAGS = -I$(srcdir)/src -Isrc -I$(top_builddir)/lib/cpp/src -DTHRIFT_ENABLE_PLUGIN=1
+libthriftc_la_CXXFLAGS = -Wall -Wextra -pedantic
+libthriftc_la_LIBADD = $(top_builddir)/lib/cpp/libthrift.la
+
+include_thriftdir = $(includedir)/thrift
+include_thrift_HEADERS = src/thrift/common.h \
+ src/thrift/globals.h \
+ src/thrift/logging.h \
+ src/thrift/main.h \
+ src/thrift/platform.h \
+ src/thrift/version.h
+
+include_auditdir = $(include_thriftdir)/windows
+include_audit_HEADERS = src/thrift/audit/t_audit.h
+
+include_generatedir = $(include_thriftdir)/generate
+include_generate_HEADERS = src/thrift/generate/t_generator.h \
+ src/thrift/generate/t_generator_registry.h \
+ src/thrift/generate/t_oop_generator.h \
+ src/thrift/generate/t_html_generator.h
+
+include_parsedir = $(include_thriftdir)/parse
+include_parse_HEADERS = src/thrift/parse/t_service.h \
+ src/thrift/parse/t_program.h \
+ src/thrift/parse/t_field.h \
+ src/thrift/parse/t_scope.h \
+ src/thrift/parse/t_typedef.h \
+ src/thrift/parse/t_set.h \
+ src/thrift/parse/t_const_value.h \
+ src/thrift/parse/t_enum_value.h \
+ src/thrift/parse/t_const.h \
+ src/thrift/parse/t_list.h \
+ src/thrift/parse/t_map.h \
+ src/thrift/parse/t_container.h \
+ src/thrift/parse/t_base_type.h \
+ src/thrift/parse/t_enum.h \
+ src/thrift/parse/t_function.h \
+ src/thrift/parse/t_type.h \
+ src/thrift/parse/t_doc.h \
+ src/thrift/parse/t_struct.h
+
+include_plugindir = $(include_thriftdir)/plugin
+include_plugin_HEADERS = src/thrift/plugin/plugin.h \
+ src/thrift/plugin/type_util.h \
+ src/thrift/plugin/plugin_output.h
+
+include_windowsdir = $(include_thriftdir)/windows
+include_windows_HEADERS = src/thrift/windows/config.h
+endif
+
+WINDOWS_DIST = \
+ compiler.sln \
+ compiler.vcxproj \
+ compiler.vcxproj.filters
+
+EXTRA_DIST = \
+ coding_standards.md \
+ README.md \
+ CMakeLists.txt \
+ test \
+ $(WINDOWS_DIST)
+
+clean-local:
+ $(RM) version.h $(plugin_gen)
+
+src/thrift/main.cc: src/thrift/version.h
+
+style-local:
+ $(CPPSTYLE_CMD)
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/README.md b/vendor/github.com/apache/thrift/compiler/cpp/README.md
new file mode 100644
index 000000000..77cb23421
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/README.md
@@ -0,0 +1,101 @@
+# Build compiler using CMake
+
+## build on Unix-like System
+
+### build using cmake
+
+Use the following steps to build using cmake:
+
+```
+mkdir cmake-build
+cd cmake-build
+cmake ..
+make
+```
+
+### Create an eclipse project
+
+```
+mkdir cmake-ec && cd cmake-ec
+cmake -G "Eclipse CDT4 - Unix Makefiles" ..
+make
+```
+
+Now open the folder cmake-ec using eclipse.
+
+
+## Cross compile using mingw32 and generate a Windows Installer with CPack
+
+```
+mkdir cmake-mingw32 && cd cmake-mingw32
+cmake -DCMAKE_TOOLCHAIN_FILE=../build/cmake/mingw32-toolchain.cmake -DBUILD_COMPILER=ON -DBUILD_LIBRARIES=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF ..
+cpack
+```
+
+# Build on windows
+
+### using Git Bash
+
+Git Bash provides flex and bison, so you just need to do this:
+
+```
+mkdir cmake-vs && cd cmake-vs
+cmake -DWITH_SHARED_LIB=off ..
+```
+
+### using Win flex-bison
+
+In order to build on windows with winflexbison a few additional steps are necessary:
+
+1. Download winflexbison from http://sourceforge.net/projects/winflexbison/
+2. Extract the winflex bison files to for e.g. C:\winflexbison
+3. Make the CMake variables point to the correct binaries.
+ * FLEX_EXECUTABLE = C:/winbuild/win_flex.exe
+ * BISON_EXECUTABLE = C:/winbuild/win_bison.exe
+4. Generate a Visual Studio project:
+```
+mkdir cmake-vs && cd cmake-vs
+cmake -G "Visual Studio 12" -DWITH_SHARED_LIB=off ..
+```
+5. Now open the folder build_vs using Visual Studio 2013.
+
+# Building the Thrift IDL compiler in Windows
+
+If you don't want to use CMake you can use the already available Visual Studio
+2010 solution.
+The Visual Studio project contains pre-build commands to generate the
+thriftl.cc, thrifty.cc and thrifty.hh files which are necessary to build
+the compiler. These depend on bison, flex and their dependencies to
+work properly.
+Download flex & bison as described above.
+Place these binaries somewhere in the path and
+rename win_flex.exe and win_bison.exe to flex.exe and bison.exe respectively.
+
+If this doesn't work on a system, try these manual pre-build steps.
+
+Open compiler.sln and remove the Pre-build commands under the project's
+ Properties -> Build Events -> Pre-Build Events.
+
+From a command prompt:
+```
+cd thrift/compiler/cpp
+flex -osrc\thrift\thriftl.cc src\thrift\thriftl.ll
+```
+In the generated thriftl.cc, comment out #include
+
+Place a copy of bison.simple in thrift/compiler/cpp
+```
+bison -y -o "src/thrift/thrifty.cc" --defines src/thrift/thrifty.yy
+move src\thrift\thrifty.cc.hh src\thrift\thrifty.hh
+```
+
+Bison might generate the yacc header file "thrifty.cc.h" with just one h ".h" extension; in this case you'll have to rename to "thrifty.h".
+
+```
+move src\thrift\version.h.in src\thrift\version.h
+```
+
+Download inttypes.h from the interwebs and place it in an include path
+location (e.g. thrift/compiler/cpp/src).
+
+Build the compiler in Visual Studio.
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/coding_standards.md b/vendor/github.com/apache/thrift/compiler/cpp/coding_standards.md
new file mode 100644
index 000000000..ea089467e
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/coding_standards.md
@@ -0,0 +1,4 @@
+## Compiler Coding Standards
+
+ * When making small change / bugfix - follow style as seen in nearby code.
+ * When making major refactor and / or adding new feature - follow style for C++ library
\ No newline at end of file
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/compiler.sln b/vendor/github.com/apache/thrift/compiler/cpp/compiler.sln
new file mode 100644
index 000000000..94961aaef
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/compiler.sln
@@ -0,0 +1,20 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "compiler", "compiler.vcxproj", "{89975A1A-F799-4556-98B8-64E30AB39A90}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Release|Win32 = Release|Win32
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {89975A1A-F799-4556-98B8-64E30AB39A90}.Debug|Win32.ActiveCfg = Debug|Win32
+ {89975A1A-F799-4556-98B8-64E30AB39A90}.Debug|Win32.Build.0 = Debug|Win32
+ {89975A1A-F799-4556-98B8-64E30AB39A90}.Release|Win32.ActiveCfg = Release|Win32
+ {89975A1A-F799-4556-98B8-64E30AB39A90}.Release|Win32.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj b/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj
new file mode 100644
index 000000000..4b03253e2
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj
@@ -0,0 +1,251 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ Win32
+
+
+ Release
+ x64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {89975A1A-F799-4556-98B8-64E30AB39A90}
+ Win32Proj
+ compiler
+
+
+
+ Application
+ true
+ MultiByte
+
+
+ Application
+ true
+ MultiByte
+
+
+ Application
+ false
+ true
+ MultiByte
+
+
+ Application
+ false
+ true
+ MultiByte
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ $(ProjectDir)\src\;$(ProjectDir)\src\windows\;$(IncludePath)
+ thrift
+ $(ExecutablePath);C:\Program Files (x86)\Git\bin
+
+
+ true
+ $(ProjectDir)\src\;$(ProjectDir)\src\windows\;$(IncludePath)
+ thrift
+ $(ExecutablePath);C:\Program Files (x86)\Git\bin
+
+
+ false
+ $(ProjectDir)\src\;$(ProjectDir)\src\windows\;$(IncludePath)
+ thrift
+ $(ExecutablePath);C:\Program Files (x86)\Git\bin
+
+
+ false
+ $(ProjectDir)\src\;$(ProjectDir)\src\windows\;$(IncludePath)
+ thrift
+ $(ExecutablePath);C:\Program Files (x86)\Git\bin
+
+
+
+
+
+ Level3
+ Disabled
+ WIN32;MINGW;YY_NO_UNISTD_H;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ thrift\windows\config.h
+ CompileAsCpp
+
+
+ Console
+ true
+
+
+ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy
+
+
+
+
+
+
+ Level3
+ Disabled
+ WIN32;MINGW;YY_NO_UNISTD_H;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ thrift\windows\config.h
+ CompileAsCpp
+
+
+ Console
+ true
+
+
+ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy
+
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;MINGW;YY_NO_UNISTD_H;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ thrift\windows\config.h
+ CompileAsCpp
+
+
+ Console
+ true
+ true
+ true
+
+
+ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy
+
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;MINGW;YY_NO_UNISTD_H;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ thrift\windows\config.h
+ CompileAsCpp
+
+
+ Console
+ true
+ true
+ true
+
+
+ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy
+
+
+
+
+
+
+
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj.filters b/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj.filters
new file mode 100644
index 000000000..b96865b51
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/compiler.vcxproj.filters
@@ -0,0 +1,199 @@
+
+
+
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+ parse
+
+
+
+
+ windows
+
+
+ windows
+
+
+
+
+ {ae9d0a15-57ae-4f01-87a4-81f790249b83}
+
+
+ {5df016bb-591b-420a-a535-4330d9187fbf}
+
+
+ {b5c626af-afa5-433c-8e10-ee734533cb68}
+
+
+
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+ generate
+
+
+
+ parse
+
+
+
+
+ parse
+
+
+ generate
+
+
+ generate
+
+
+
+
+
+
+
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/Makefile.am b/vendor/github.com/apache/thrift/compiler/cpp/src/Makefile.am
new file mode 100644
index 000000000..bc2c5cbac
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/Makefile.am
@@ -0,0 +1,87 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# Contains some contributions under the Thrift Software License.
+# Please see doc/old-thrift-license.txt in the Thrift distribution for
+# details.
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+AM_YFLAGS = -d
+
+BUILT_SOURCES = thrift/thrifty.cc
+
+noinst_LIBRARIES = thrift/libparse.a
+
+thrift_libparse_a_CPPFLAGS = -I$(srcdir)
+thrift_libparse_a_CXXFLAGS = -Wall -Wno-sign-compare -Wno-unused
+
+thrift_libparse_a_SOURCES = thrift/thrifty.yy \
+ thrift/thriftl.ll
+
+clean-local:
+ $(RM) thrift/thriftl.cc thrift/thrifty.cc thrift/thrifty.h thrift/thrifty.hh
+
+if WITH_PLUGIN
+noinst_PROGRAMS = thrift/thrift-bootstrap
+
+thrift_thrift_bootstrap_SOURCES = \
+ thrift/common.h \
+ thrift/common.cc \
+ thrift/audit/t_audit.h \
+ thrift/audit/t_audit.cpp \
+ thrift/generate/t_generator.cc \
+ thrift/generate/t_generator_registry.h \
+ thrift/globals.h \
+ thrift/platform.h \
+ thrift/logging.h \
+ thrift/parse/t_doc.h \
+ thrift/parse/t_type.h \
+ thrift/parse/t_base_type.h \
+ thrift/parse/t_enum.h \
+ thrift/parse/t_enum_value.h \
+ thrift/parse/t_typedef.h \
+ thrift/parse/t_typedef.cc \
+ thrift/parse/t_container.h \
+ thrift/parse/t_list.h \
+ thrift/parse/t_set.h \
+ thrift/parse/t_map.h \
+ thrift/parse/t_struct.h \
+ thrift/parse/t_field.h \
+ thrift/parse/t_service.h \
+ thrift/parse/t_function.h \
+ thrift/parse/t_program.h \
+ thrift/parse/t_scope.h \
+ thrift/parse/t_const.h \
+ thrift/parse/t_const_value.h \
+ thrift/parse/parse.cc \
+ thrift/generate/t_generator.h \
+ thrift/generate/t_oop_generator.h \
+ thrift/generate/t_html_generator.h \
+ thrift/windows/config.h \
+ thrift/version.h \
+ thrift/generate/t_cpp_generator.cc \
+ thrift/main.h \
+ thrift/main.cc
+
+main.cc: version.h
+
+thrift_thrift_bootstrap_CXXFLAGS = -Wall -Wextra -pedantic
+thrift_thrift_bootstrap_LDADD = @LEXLIB@ thrift/libparse.a
+endif
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.cpp b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.cpp
new file mode 100644
index 000000000..1386f3bd1
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.cpp
@@ -0,0 +1,464 @@
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// Careful: must include globals first for extern definitions
+#include "thrift/globals.h"
+
+#include "thrift/parse/t_program.h"
+#include "thrift/parse/t_scope.h"
+#include "thrift/parse/t_const.h"
+#include "thrift/parse/t_field.h"
+
+#include "thrift/version.h"
+
+#include "thrift/audit/t_audit.h"
+
+extern int g_warn;
+extern std::string g_curpath;
+extern bool g_return_failure;
+
+void thrift_audit_warning(int level, const char* fmt, ...) {
+ if (g_warn < level) {
+ return;
+ }
+ va_list args;
+ printf("[Thrift Audit Warning:%s] ", g_curpath.c_str());
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+ printf("\n");
+}
+
+void thrift_audit_failure(const char* fmt, ...) {
+ va_list args;
+ fprintf(stderr, "[Thrift Audit Failure:%s] ", g_curpath.c_str());
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+ fprintf(stderr, "\n");
+ g_return_failure = true;
+}
+
+void compare_namespace(t_program* newProgram, t_program* oldProgram)
+{
+ const std::map& newNamespaceMap = newProgram->get_all_namespaces();
+ const std::map& oldNamespaceMap = oldProgram->get_all_namespaces();
+
+ for(std::map::const_iterator oldNamespaceMapIt = oldNamespaceMap.begin();
+ oldNamespaceMapIt != oldNamespaceMap.end();
+ oldNamespaceMapIt++)
+ {
+ std::map::const_iterator newNamespaceMapIt = newNamespaceMap.find(oldNamespaceMapIt->first);
+ if(newNamespaceMapIt == newNamespaceMap.end())
+ {
+ thrift_audit_warning(1, "Language %s not found in new thrift file\n", (oldNamespaceMapIt->first).c_str());
+ }
+ else if((newNamespaceMapIt->second) != oldNamespaceMapIt->second)
+ {
+ thrift_audit_warning(1, "Namespace %s changed in new thrift file\n", (oldNamespaceMapIt->second).c_str());
+ }
+ }
+}
+
+void compare_enum_values(t_enum* newEnum,t_enum* oldEnum)
+{
+ const std::vector& oldEnumValues = oldEnum->get_constants();
+ for(std::vector::const_iterator oldEnumValuesIt = oldEnumValues.begin();
+ oldEnumValuesIt != oldEnumValues.end();
+ oldEnumValuesIt++)
+ {
+ int enumValue = (*oldEnumValuesIt)->get_value();
+ t_enum_value* newEnumValue = newEnum->get_constant_by_value(enumValue);
+ if(newEnumValue != NULL)
+ {
+ std::string enumName = (*oldEnumValuesIt)->get_name();
+ if(enumName != newEnumValue->get_name())
+ {
+ thrift_audit_warning(1, "Name of the value %d changed in enum %s\n", enumValue, oldEnum->get_name().c_str());
+ }
+ }
+ else
+ {
+ thrift_audit_failure("Enum value %d missing in %s\n", enumValue, oldEnum->get_name().c_str());
+ }
+
+ }
+}
+
+void compare_enums(const std::vector& newEnumList, const std::vector& oldEnumList)
+{
+ std::map newEnumMap;
+ std::vector::const_iterator newEnumIt;
+ for(newEnumIt = newEnumList.begin(); newEnumIt != newEnumList.end(); newEnumIt++)
+ {
+ newEnumMap[(*newEnumIt)->get_name()] = *newEnumIt;
+ }
+ std::vector::const_iterator oldEnumIt;
+ for(oldEnumIt = oldEnumList.begin(); oldEnumIt != oldEnumList.end(); oldEnumIt++)
+ {
+ std::map::iterator newEnumMapIt;
+ newEnumMapIt = newEnumMap.find((*oldEnumIt)->get_name());
+
+ if(newEnumMapIt == newEnumMap.end())
+ {
+ thrift_audit_warning(1, "Enum %s not found in new thrift file\n",(*oldEnumIt)->get_name().c_str());
+ }
+ else
+ {
+ compare_enum_values(newEnumMapIt->second, *oldEnumIt);
+ }
+ }
+}
+
+//This function returns 'true' if the two arguements are of same types.
+//Returns false if they are of different type
+bool compare_type(t_type* newType, t_type* oldType)
+{
+ //Comparing names of two types will work when the newType and oldType are basic types or structs or enums.
+ //However, when they are containers, get_name() returns empty for which we have to compare the type of
+ //their elements as well.
+ if((newType->get_name()).empty() && (oldType->get_name()).empty())
+ {
+
+ if(newType->is_list() && oldType->is_list())
+ {
+ t_type* newElementType = ((t_list*)newType)->get_elem_type();
+ t_type* oldElementType = ((t_list*)oldType)->get_elem_type();
+ return compare_type(newElementType, oldElementType);
+ }
+ else if(newType->is_map() && oldType->is_map())
+ {
+ t_type* newKeyType = ((t_map*)newType)->get_key_type();
+ t_type* oldKeyType = ((t_map*)oldType)->get_key_type();
+
+ t_type* newValType = ((t_map*)newType)->get_val_type();
+ t_type* oldValType = ((t_map*)oldType)->get_val_type();
+
+ return (compare_type(newKeyType, oldKeyType) && compare_type(newValType, oldValType));
+ }
+ else if(newType->is_set() && oldType->is_set())
+ {
+ t_type* newElementType = ((t_set*)newType)->get_elem_type();
+ t_type* oldElementType = ((t_set*)oldType)->get_elem_type();
+ return compare_type(newElementType, oldElementType);
+ }
+ else
+ {
+ return false;
+ }
+ }
+ else if(newType->get_name() == oldType->get_name())
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+bool compare_pair(std::pair newMapPair, std::pair oldMapPair)
+{
+ return compare_defaults(newMapPair.first, oldMapPair.first) && compare_defaults(newMapPair.second, oldMapPair.second);
+}
+
+// This function returns 'true' if the default values are same. Returns false if they are different.
+bool compare_defaults(t_const_value* newStructDefault, t_const_value* oldStructDefault)
+{
+ if(newStructDefault == NULL && oldStructDefault == NULL) return true;
+ else if(newStructDefault == NULL && oldStructDefault != NULL) return false;
+ else if (newStructDefault != NULL && oldStructDefault == NULL) return false;
+
+ if(newStructDefault->get_type() != oldStructDefault->get_type())
+ {
+ return false;
+ }
+
+ switch(newStructDefault->get_type())
+ {
+ case t_const_value::CV_INTEGER:
+ return (newStructDefault->get_integer() == oldStructDefault->get_integer());
+ case t_const_value::CV_DOUBLE:
+ return (newStructDefault->get_double() == oldStructDefault->get_double());
+ case t_const_value::CV_STRING:
+ return (newStructDefault->get_string() == oldStructDefault->get_string());
+ case t_const_value::CV_LIST:
+ {
+ const std::vector& oldDefaultList = oldStructDefault->get_list();
+ const std::vector& newDefaultList = newStructDefault->get_list();
+ bool defaultValuesCompare = (oldDefaultList.size() == newDefaultList.size());
+
+ return defaultValuesCompare && std::equal(newDefaultList.begin(), newDefaultList.end(), oldDefaultList.begin(), compare_defaults);
+ }
+ case t_const_value::CV_MAP:
+ {
+ const std::map newMap = newStructDefault->get_map();
+ const std::map oldMap = oldStructDefault->get_map();
+
+ bool defaultValuesCompare = (oldMap.size() == newMap.size());
+
+ return defaultValuesCompare && std::equal(newMap.begin(), newMap.end(), oldMap.begin(), compare_pair);
+ }
+ case t_const_value::CV_IDENTIFIER:
+ return (newStructDefault->get_identifier() == oldStructDefault->get_identifier());
+ default:
+ return false;
+ }
+
+}
+
+void compare_struct_field(t_field* newField, t_field* oldField, std::string oldStructName)
+{
+ t_type* newFieldType = newField->get_type();
+ t_type* oldFieldType = oldField->get_type();
+ if(!compare_type(newFieldType, oldFieldType))
+ {
+ thrift_audit_failure("Struct Field Type Changed for Id = %d in %s \n", newField->get_key(), oldStructName.c_str());
+ }
+
+ // A Struct member can be optional if it is mentioned explicitly, or if it is assigned with default values.
+ bool newStructFieldOptional = (newField->get_req() != t_field::T_REQUIRED);
+ bool oldStructFieldOptional = (oldField->get_req() != t_field::T_REQUIRED);
+
+ if(newStructFieldOptional != oldStructFieldOptional)
+ {
+ thrift_audit_failure("Struct Field Requiredness Changed for Id = %d in %s \n", newField->get_key(), oldStructName.c_str());
+ }
+ if(newStructFieldOptional || oldStructFieldOptional)
+ {
+ if(!compare_defaults(newField->get_value(), oldField->get_value()))
+ {
+ thrift_audit_warning(1, "Default value changed for Id = %d in %s \n", newField->get_key(), oldStructName.c_str());
+ }
+ }
+
+ std::string fieldName = newField->get_name();
+ if(fieldName != oldField->get_name())
+ {
+ thrift_audit_warning(1, "Struct field name changed for Id = %d in %s\n", newField->get_key(), oldStructName.c_str());
+ }
+
+}
+
+void compare_single_struct(t_struct* newStruct, t_struct* oldStruct, const std::string& oldStructName = std::string())
+{
+ std::string structName = oldStructName.empty() ? oldStruct->get_name() : oldStructName;
+ const std::vector& oldStructMembersInIdOrder = oldStruct->get_sorted_members();
+ const std::vector& newStructMembersInIdOrder = newStruct->get_sorted_members();
+ std::vector::const_iterator oldStructMemberIt = oldStructMembersInIdOrder.begin();
+ std::vector::const_iterator newStructMemberIt = newStructMembersInIdOrder.begin();
+
+ // Since we have the struct members in their ID order, comparing their IDs can be done by traversing the two member
+ // lists together.
+ while(!(oldStructMemberIt == oldStructMembersInIdOrder.end() && newStructMemberIt == newStructMembersInIdOrder.end()))
+ {
+ if(newStructMemberIt == newStructMembersInIdOrder.end() && oldStructMemberIt != oldStructMembersInIdOrder.end())
+ {
+ // A field ID has been removed from the end.
+ thrift_audit_failure("Struct Field removed for Id = %d in %s \n", (*oldStructMemberIt)->get_key(), structName.c_str());
+ oldStructMemberIt++;
+ }
+ else if(newStructMemberIt != newStructMembersInIdOrder.end() && oldStructMemberIt == oldStructMembersInIdOrder.end())
+ {
+ //New field ID has been added to the end.
+ if((*newStructMemberIt)->get_req() == t_field::T_REQUIRED)
+ {
+ thrift_audit_failure("Required Struct Field Added for Id = %d in %s \n", (*newStructMemberIt)->get_key(), structName.c_str());
+ }
+ newStructMemberIt++;
+ }
+ else if((*newStructMemberIt)->get_key() == (*oldStructMemberIt)->get_key())
+ {
+ //Field ID found in both structs. Compare field types, default values.
+ compare_struct_field(*newStructMemberIt, *oldStructMemberIt, structName);
+
+ newStructMemberIt++;
+ oldStructMemberIt++;
+ }
+ else if((*newStructMemberIt)->get_key() < (*oldStructMemberIt)->get_key())
+ {
+ //New Field Id is inserted in between
+ //Adding fields to struct is fine, but adding them in the middle is suspicious. Error!!
+ thrift_audit_failure("Struct field is added in the middle with Id = %d in %s\n", (*newStructMemberIt)->get_key(), structName.c_str());
+ newStructMemberIt++;
+ }
+ else if((*newStructMemberIt)->get_key() > (*oldStructMemberIt)->get_key())
+ {
+ //A field is deleted in newStruct.
+ thrift_audit_failure("Struct Field removed for Id = %d in %s \n", (*oldStructMemberIt)->get_key(), structName.c_str());
+ oldStructMemberIt++;
+ }
+
+ }
+}
+
+void compare_structs(const std::vector& newStructList, const std::vector& oldStructList)
+{
+ std::map newStructMap;
+ std::vector::const_iterator newStructListIt;
+ for(newStructListIt = newStructList.begin(); newStructListIt != newStructList.end(); newStructListIt++)
+ {
+ newStructMap[(*newStructListIt)->get_name()] = *newStructListIt;
+ }
+
+ std::vector::const_iterator oldStructListIt;
+ for(oldStructListIt = oldStructList.begin(); oldStructListIt != oldStructList.end(); oldStructListIt++)
+ {
+ std::map::iterator newStructMapIt;
+ newStructMapIt = newStructMap.find((*oldStructListIt)->get_name());
+ if(newStructMapIt == newStructMap.end())
+ {
+ thrift_audit_failure("Struct %s not found in new thrift file\n", (*oldStructListIt)->get_name().c_str());
+ }
+ else
+ {
+ compare_single_struct(newStructMapIt->second, *oldStructListIt);
+ }
+ }
+
+}
+
+void compare_single_function(t_function* newFunction, t_function* oldFunction)
+{
+ t_type* newFunctionReturnType = newFunction->get_returntype();
+
+ if(newFunction->is_oneway() != oldFunction->is_oneway())
+ {
+ thrift_audit_failure("Oneway attribute changed for function %s\n",oldFunction->get_name().c_str());
+ }
+ if(!compare_type(newFunctionReturnType, oldFunction->get_returntype()))
+ {
+ thrift_audit_failure("Return type changed for function %s\n",oldFunction->get_name().c_str());
+ }
+
+ //Compare function arguments.
+ compare_single_struct(newFunction->get_arglist(), oldFunction->get_arglist());
+ std::string exceptionName = oldFunction->get_name();
+ exceptionName += "_exception";
+ compare_single_struct(newFunction->get_xceptions(), oldFunction->get_xceptions(), exceptionName);
+}
+
+void compare_functions(const std::vector& newFunctionList, const std::vector& oldFunctionList)
+{
+ std::map newFunctionMap;
+ std::map::iterator newFunctionMapIt;
+ for(std::vector::const_iterator newFunctionIt = newFunctionList.begin();
+ newFunctionIt != newFunctionList.end();
+ newFunctionIt++)
+ {
+ newFunctionMap[(*newFunctionIt)->get_name()] = *newFunctionIt;
+ }
+
+ for(std::vector::const_iterator oldFunctionIt = oldFunctionList.begin();
+ oldFunctionIt != oldFunctionList.end();
+ oldFunctionIt++)
+ {
+ newFunctionMapIt = newFunctionMap.find((*oldFunctionIt)->get_name());
+ if(newFunctionMapIt == newFunctionMap.end())
+ {
+ thrift_audit_failure("New Thrift File has missing function %s\n",(*oldFunctionIt)->get_name().c_str());
+ continue;
+ }
+ else
+ {
+ //Function is found in both thrift files. Compare return type and argument list
+ compare_single_function(newFunctionMapIt->second, *oldFunctionIt);
+ }
+ }
+
+}
+
+void compare_services(const std::vector& newServices, const std::vector& oldServices)
+{
+ std::vector::const_iterator oldServiceIt;
+
+ std::map newServiceMap;
+ for(std::vector::const_iterator newServiceIt = newServices.begin();
+ newServiceIt != newServices.end();
+ newServiceIt++)
+ {
+ newServiceMap[(*newServiceIt)->get_name()] = *newServiceIt;
+ }
+
+
+ for(oldServiceIt = oldServices.begin(); oldServiceIt != oldServices.end(); oldServiceIt++)
+ {
+ const std::string oldServiceName = (*oldServiceIt)->get_name();
+ std::map::iterator newServiceMapIt = newServiceMap.find(oldServiceName);
+
+ if(newServiceMapIt == newServiceMap.end())
+ {
+ thrift_audit_failure("New Thrift file is missing a service %s\n", oldServiceName.c_str());
+ }
+ else
+ {
+ t_service* oldServiceExtends = (*oldServiceIt)->get_extends();
+ t_service* newServiceExtends = (newServiceMapIt->second)->get_extends();
+
+ if(oldServiceExtends == NULL)
+ {
+ // It is fine to add extends. So if service in older thrift did not have any extends, we are fine.
+ // DO Nothing
+ }
+ else if(oldServiceExtends != NULL && newServiceExtends == NULL)
+ {
+ thrift_audit_failure("Change in Service inheritance for %s\n", oldServiceName.c_str());
+ }
+ else
+ {
+ std::string oldExtendsName = oldServiceExtends->get_name();
+ std::string newExtendsName = newServiceExtends->get_name();
+
+ if( newExtendsName != oldExtendsName)
+ {
+ thrift_audit_failure("Change in Service inheritance for %s\n", oldServiceName.c_str());
+ }
+ }
+
+ compare_functions((newServiceMapIt->second)->get_functions(), (*oldServiceIt)->get_functions());
+ }
+
+ }
+
+}
+
+void compare_consts(const std::vector& newConst, const std::vector& oldConst)
+{
+ std::vector::const_iterator newConstIt;
+ std::vector::const_iterator oldConstIt;
+
+ std::map newConstMap;
+
+ for(newConstIt = newConst.begin(); newConstIt != newConst.end(); newConstIt++)
+ {
+ newConstMap[(*newConstIt)->get_name()] = *newConstIt;
+ }
+
+ std::map::const_iterator newConstMapIt;
+ for(oldConstIt = oldConst.begin(); oldConstIt != oldConst.end(); oldConstIt++)
+ {
+ newConstMapIt = newConstMap.find((*oldConstIt)->get_name());
+ if(newConstMapIt == newConstMap.end())
+ {
+ thrift_audit_warning(1, "Constants Missing %s \n", ((*oldConstIt)->get_name()).c_str());
+ }
+ else if(!compare_type((newConstMapIt->second)->get_type(), (*oldConstIt)->get_type()))
+ {
+ thrift_audit_warning(1, "Constant %s is of different type \n", ((*oldConstIt)->get_name()).c_str());
+ }
+ else if(!compare_defaults((newConstMapIt->second)->get_value(), (*oldConstIt)->get_value()))
+ {
+ thrift_audit_warning(1, "Constant %s has different value\n", ((*oldConstIt)->get_name()).c_str());
+ }
+ }
+}
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.h b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.h
new file mode 100644
index 000000000..be79e3124
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/audit/t_audit.h
@@ -0,0 +1,14 @@
+#ifndef T_AUDIT_H
+#define T_AUDIT_H
+
+void compare_namespace(t_program* newProgram, t_program* oldProgram);
+void compare_enums(const std::vector& newEnumList,
+ const std::vector& oldEnumList);
+bool compare_defaults(t_const_value* newStructDefault, t_const_value* oldStructDefault);
+void compare_structs(const std::vector& newStructList,
+ const std::vector& oldStructList);
+void compare_services(const std::vector& newServices,
+ const std::vector& oldServices);
+void compare_consts(const std::vector& newConst, const std::vector& oldConst);
+
+#endif
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.cc b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.cc
new file mode 100644
index 000000000..3a2b9d359
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.cc
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "thrift/common.h"
+#include "thrift/parse/t_base_type.h"
+
+t_type* g_type_void;
+t_type* g_type_string;
+t_type* g_type_binary;
+t_type* g_type_slist;
+t_type* g_type_bool;
+t_type* g_type_i8;
+t_type* g_type_i16;
+t_type* g_type_i32;
+t_type* g_type_i64;
+t_type* g_type_double;
+
+void initGlobals() {
+ g_type_void = new t_base_type("void", t_base_type::TYPE_VOID);
+ g_type_string = new t_base_type("string", t_base_type::TYPE_STRING);
+ g_type_binary = new t_base_type("string", t_base_type::TYPE_STRING);
+ ((t_base_type*)g_type_binary)->set_binary(true);
+ g_type_slist = new t_base_type("string", t_base_type::TYPE_STRING);
+ ((t_base_type*)g_type_slist)->set_string_list(true);
+ g_type_bool = new t_base_type("bool", t_base_type::TYPE_BOOL);
+ g_type_i8 = new t_base_type("i8", t_base_type::TYPE_I8);
+ g_type_i16 = new t_base_type("i16", t_base_type::TYPE_I16);
+ g_type_i32 = new t_base_type("i32", t_base_type::TYPE_I32);
+ g_type_i64 = new t_base_type("i64", t_base_type::TYPE_I64);
+ g_type_double = new t_base_type("double", t_base_type::TYPE_DOUBLE);
+}
+
+void clearGlobals() {
+ delete g_type_void;
+ delete g_type_string;
+ delete g_type_bool;
+ delete g_type_i8;
+ delete g_type_i16;
+ delete g_type_i32;
+ delete g_type_i64;
+ delete g_type_double;
+}
+
+/**
+ * Those are not really needed for plugins but causes link errors without
+ */
+
+/**
+ * The location of the last parsed doctext comment.
+ */
+int g_doctext_lineno;
+int g_program_doctext_lineno = 0;
+PROGDOCTEXT_STATUS g_program_doctext_status = INVALID;
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.h b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.h
new file mode 100644
index 000000000..694884636
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/common.h
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef T_COMMON_H
+#define T_COMMON_H
+
+#include "thrift/parse/t_type.h"
+
+/**
+ * Global types for the parser to be able to reference
+ */
+
+extern t_type* g_type_void;
+extern t_type* g_type_string;
+extern t_type* g_type_binary;
+extern t_type* g_type_slist;
+extern t_type* g_type_bool;
+extern t_type* g_type_i8;
+extern t_type* g_type_i16;
+extern t_type* g_type_i32;
+extern t_type* g_type_i64;
+extern t_type* g_type_double;
+
+void initGlobals();
+void clearGlobals();
+
+#endif
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_as3_generator.cc b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_as3_generator.cc
new file mode 100644
index 000000000..fc92de954
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_as3_generator.cc
@@ -0,0 +1,2594 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "thrift/platform.h"
+#include "thrift/generate/t_oop_generator.h"
+
+using std::map;
+using std::ofstream;
+using std::ostringstream;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+static const string endl = "\n"; // avoid ostream << std::endl flushes
+
+/**
+ * AS3 code generator.
+ *
+ */
+class t_as3_generator : public t_oop_generator {
+public:
+ t_as3_generator(t_program* program,
+ const std::map& parsed_options,
+ const std::string& option_string)
+ : t_oop_generator(program) {
+ (void)option_string;
+ std::map::const_iterator iter;
+
+ bindable_ = false;
+ for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) {
+ if( iter->first.compare("bindable") == 0) {
+ bindable_ = true;
+ } else {
+ throw "unknown option as3:" + iter->first;
+ }
+ }
+
+ out_dir_base_ = "gen-as3";
+ }
+
+ /**
+ * Init and close methods
+ */
+
+ void init_generator();
+ void close_generator();
+
+ void generate_consts(std::vector consts);
+
+ /**
+ * Program-level generation functions
+ */
+
+ void generate_typedef(t_typedef* ttypedef);
+ void generate_enum(t_enum* tenum);
+ void generate_struct(t_struct* tstruct);
+ void generate_xception(t_struct* txception);
+ void generate_service(t_service* tservice);
+
+ void print_const_value(std::ofstream& out,
+ std::string name,
+ t_type* type,
+ t_const_value* value,
+ bool in_static,
+ bool defval = false);
+ std::string render_const_value(ofstream& out,
+ std::string name,
+ t_type* type,
+ t_const_value* value);
+
+ /**
+ * Service-level generation functions
+ */
+
+ void generate_as3_struct(t_struct* tstruct, bool is_exception);
+
+ void generate_as3_struct_definition(std::ofstream& out,
+ t_struct* tstruct,
+ bool is_xception = false,
+ bool in_class = false,
+ bool is_result = false);
+ // removed -- equality,compare_to
+ void generate_as3_struct_reader(std::ofstream& out, t_struct* tstruct);
+ void generate_as3_validator(std::ofstream& out, t_struct* tstruct);
+ void generate_as3_struct_result_writer(std::ofstream& out, t_struct* tstruct);
+ void generate_as3_struct_writer(std::ofstream& out, t_struct* tstruct);
+ void generate_as3_struct_tostring(std::ofstream& out, t_struct* tstruct, bool bindable);
+ void generate_as3_meta_data_map(std::ofstream& out, t_struct* tstruct);
+ void generate_field_value_meta_data(std::ofstream& out, t_type* type);
+ std::string get_as3_type_string(t_type* type);
+ void generate_reflection_setters(std::ostringstream& out,
+ t_type* type,
+ std::string field_name,
+ std::string cap_name);
+ void generate_reflection_getters(std::ostringstream& out,
+ t_type* type,
+ std::string field_name,
+ std::string cap_name);
+ void generate_generic_field_getters_setters(std::ofstream& out, t_struct* tstruct);
+ void generate_generic_isset_method(std::ofstream& out, t_struct* tstruct);
+ void generate_as3_bean_boilerplate(std::ofstream& out, t_struct* tstruct, bool bindable);
+
+ void generate_function_helpers(t_function* tfunction);
+ std::string get_cap_name(std::string name);
+ std::string generate_isset_check(t_field* field);
+ std::string generate_isset_check(std::string field);
+ void generate_isset_set(ofstream& out, t_field* field);
+ // removed std::string isset_field_id(t_field* field);
+
+ void generate_service_interface(t_service* tservice);
+ void generate_service_helpers(t_service* tservice);
+ void generate_service_client(t_service* tservice);
+ void generate_service_server(t_service* tservice);
+ void generate_process_function(t_service* tservice, t_function* tfunction);
+
+ /**
+ * Serialization constructs
+ */
+
+ void generate_deserialize_field(std::ofstream& out, t_field* tfield, std::string prefix = "");
+
+ void generate_deserialize_struct(std::ofstream& out, t_struct* tstruct, std::string prefix = "");
+
+ void generate_deserialize_container(std::ofstream& out, t_type* ttype, std::string prefix = "");
+
+ void generate_deserialize_set_element(std::ofstream& out, t_set* tset, std::string prefix = "");
+
+ void generate_deserialize_map_element(std::ofstream& out, t_map* tmap, std::string prefix = "");
+
+ void generate_deserialize_list_element(std::ofstream& out,
+ t_list* tlist,
+ std::string prefix = "");
+
+ void generate_serialize_field(std::ofstream& out, t_field* tfield, std::string prefix = "");
+
+ void generate_serialize_struct(std::ofstream& out, t_struct* tstruct, std::string prefix = "");
+
+ void generate_serialize_container(std::ofstream& out, t_type* ttype, std::string prefix = "");
+
+ void generate_serialize_map_element(std::ofstream& out,
+ t_map* tmap,
+ std::string iter,
+ std::string map);
+
+ void generate_serialize_set_element(std::ofstream& out, t_set* tmap, std::string iter);
+
+ void generate_serialize_list_element(std::ofstream& out, t_list* tlist, std::string iter);
+
+ void generate_as3_doc(std::ofstream& out, t_doc* tdoc);
+
+ void generate_as3_doc(std::ofstream& out, t_function* tdoc);
+
+ /**
+ * Helper rendering functions
+ */
+
+ std::string as3_package();
+ std::string as3_type_imports();
+ std::string as3_thrift_imports();
+ std::string as3_thrift_gen_imports(t_struct* tstruct, string& imports);
+ std::string as3_thrift_gen_imports(t_service* tservice);
+ std::string type_name(t_type* ttype, bool in_container = false, bool in_init = false);
+ std::string base_type_name(t_base_type* tbase, bool in_container = false);
+ std::string declare_field(t_field* tfield, bool init = false);
+ std::string function_signature(t_function* tfunction, std::string prefix = "");
+ std::string argument_list(t_struct* tstruct);
+ std::string type_to_enum(t_type* ttype);
+ std::string get_enum_class_name(t_type* type);
+
+ bool type_can_be_null(t_type* ttype) {
+ ttype = get_true_type(ttype);
+
+ return ttype->is_container() || ttype->is_struct() || ttype->is_xception()
+ || ttype->is_string();
+ }
+
+ std::string constant_name(std::string name);
+
+private:
+ /**
+ * File streams
+ */
+
+ std::string package_name_;
+ std::ofstream f_service_;
+ std::string package_dir_;
+
+ bool bindable_;
+};
+
+/**
+ * Prepares for file generation by opening up the necessary file output
+ * streams.
+ *
+ * @param tprogram The program to generate
+ */
+void t_as3_generator::init_generator() {
+ // Make output directory
+ MKDIR(get_out_dir().c_str());
+ package_name_ = program_->get_namespace("as3");
+
+ string dir = package_name_;
+ string subdir = get_out_dir();
+ string::size_type loc;
+ while ((loc = dir.find(".")) != string::npos) {
+ subdir = subdir + "/" + dir.substr(0, loc);
+ MKDIR(subdir.c_str());
+ dir = dir.substr(loc + 1);
+ }
+ if (dir.size() > 0) {
+ subdir = subdir + "/" + dir;
+ MKDIR(subdir.c_str());
+ }
+
+ package_dir_ = subdir;
+}
+
+/**
+ * Packages the generated file
+ *
+ * @return String of the package, i.e. "package org.apache.thriftdemo;"
+ */
+string t_as3_generator::as3_package() {
+ if (!package_name_.empty()) {
+ return string("package ") + package_name_ + " ";
+ }
+ return "package ";
+}
+
+/**
+ * Prints standard as3 imports
+ *
+ * @return List of imports for As3 types that are used in here
+ */
+string t_as3_generator::as3_type_imports() {
+ return string() + "import org.apache.thrift.Set;\n" + "import flash.utils.ByteArray;\n"
+ + "import flash.utils.Dictionary;\n\n";
+}
+
+/**
+ * Prints standard as3 imports
+ *
+ * @return List of imports necessary for thrift
+ */
+string t_as3_generator::as3_thrift_imports() {
+ return string() + "import org.apache.thrift.*;\n" + "import org.apache.thrift.meta_data.*;\n"
+ + "import org.apache.thrift.protocol.*;\n\n";
+}
+
+/**
+ * Prints imports needed for a given type
+ *
+ * @return List of imports necessary for a given t_struct
+ */
+string t_as3_generator::as3_thrift_gen_imports(t_struct* tstruct, string& imports) {
+
+ const vector& members = tstruct->get_members();
+ vector::const_iterator m_iter;
+
+ // For each type check if it is from a differnet namespace
+ for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
+ t_program* program = (*m_iter)->get_type()->get_program();
+ if (program != NULL && program != program_) {
+ string package = program->get_namespace("as3");
+ if (!package.empty()) {
+ if (imports.find(package + "." + (*m_iter)->get_type()->get_name()) == string::npos) {
+ imports.append("import " + package + "." + (*m_iter)->get_type()->get_name() + ";\n");
+ }
+ }
+ }
+ }
+ return imports;
+}
+
+/**
+ * Prints imports needed for a given type
+ *
+ * @return List of imports necessary for a given t_service
+ */
+string t_as3_generator::as3_thrift_gen_imports(t_service* tservice) {
+ string imports;
+ const vector& functions = tservice->get_functions();
+ vector::const_iterator f_iter;
+
+ // For each type check if it is from a differnet namespace
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ t_program* program = (*f_iter)->get_returntype()->get_program();
+ if (program != NULL && program != program_) {
+ string package = program->get_namespace("as3");
+ if (!package.empty()) {
+ if (imports.find(package + "." + (*f_iter)->get_returntype()->get_name()) == string::npos) {
+ imports.append("import " + package + "." + (*f_iter)->get_returntype()->get_name()
+ + ";\n");
+ }
+ }
+ }
+
+ as3_thrift_gen_imports((*f_iter)->get_arglist(), imports);
+ as3_thrift_gen_imports((*f_iter)->get_xceptions(), imports);
+ }
+
+ return imports;
+}
+
+/**
+ * Nothing in As3
+ */
+void t_as3_generator::close_generator() {
+}
+
+/**
+ * Generates a typedef. This is not done in As3, since it does
+ * not support arbitrary name replacements, and it'd be a wacky waste
+ * of overhead to make wrapper classes.
+ *
+ * @param ttypedef The type definition
+ */
+void t_as3_generator::generate_typedef(t_typedef* ttypedef) {
+ (void)ttypedef;
+}
+
+/**
+ * Enums are a class with a set of static constants.
+ *
+ * @param tenum The enumeration
+ */
+void t_as3_generator::generate_enum(t_enum* tenum) {
+ // Make output file
+ string f_enum_name = package_dir_ + "/" + (tenum->get_name()) + ".as";
+ ofstream f_enum;
+ f_enum.open(f_enum_name.c_str());
+
+ // Comment and package it
+ f_enum << autogen_comment() << as3_package() << endl;
+
+ scope_up(f_enum);
+ // Add as3 imports
+ f_enum << string() + "import org.apache.thrift.Set;" << endl << "import flash.utils.Dictionary;"
+ << endl;
+
+ indent(f_enum) << "public class " << tenum->get_name() << " ";
+ scope_up(f_enum);
+
+ vector constants = tenum->get_constants();
+ vector::iterator c_iter;
+ for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
+ int value = (*c_iter)->get_value();
+ indent(f_enum) << "public static const " << (*c_iter)->get_name() << ":int = " << value << ";"
+ << endl;
+ }
+
+ // Create a static Set with all valid values for this enum
+ f_enum << endl;
+
+ indent(f_enum) << "public static const VALID_VALUES:Set = new Set(";
+ indent_up();
+ bool firstValue = true;
+ for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
+ // populate set
+ f_enum << (firstValue ? "" : ", ") << (*c_iter)->get_name();
+ firstValue = false;
+ }
+ indent_down();
+ f_enum << ");" << endl;
+
+ indent(f_enum) << "public static const VALUES_TO_NAMES:Dictionary = new Dictionary();" << endl;
+
+ scope_up(f_enum);
+ for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
+ indent(f_enum) << "VALUES_TO_NAMES[" << (*c_iter)->get_name() << "] = \""
+ << (*c_iter)->get_name() << "\";" << endl;
+ }
+ f_enum << endl;
+
+ scope_down(f_enum);
+
+ scope_down(f_enum); // end class
+
+ scope_down(f_enum); // end package
+
+ f_enum.close();
+}
+
+/**
+ * Generates a class that holds all the constants.
+ */
+void t_as3_generator::generate_consts(std::vector consts) {
+ if (consts.empty()) {
+ return;
+ }
+
+ string f_consts_name = package_dir_ + "/" + program_name_ + "Constants.as";
+ ofstream f_consts;
+ f_consts.open(f_consts_name.c_str());
+
+ // Print header
+ f_consts << autogen_comment() << as3_package();
+
+ scope_up(f_consts);
+ f_consts << endl;
+
+ f_consts << as3_type_imports();
+
+ indent(f_consts) << "public class " << program_name_ << "Constants {" << endl << endl;
+ indent_up();
+ vector::iterator c_iter;
+ for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) {
+ print_const_value(f_consts,
+ (*c_iter)->get_name(),
+ (*c_iter)->get_type(),
+ (*c_iter)->get_value(),
+ false);
+ }
+ indent_down();
+ indent(f_consts) << "}" << endl;
+ scope_down(f_consts);
+ f_consts.close();
+}
+
+void t_as3_generator::print_const_value(std::ofstream& out,
+ string name,
+ t_type* type,
+ t_const_value* value,
+ bool in_static,
+ bool defval) {
+ type = get_true_type(type);
+
+ indent(out);
+ if (!defval) {
+ out << (in_static ? "var " : "public static const ");
+ }
+ if (type->is_base_type()) {
+ string v2 = render_const_value(out, name, type, value);
+ out << name;
+ if (!defval) {
+ out << ":" << type_name(type);
+ }
+ out << " = " << v2 << ";" << endl << endl;
+ } else if (type->is_enum()) {
+ out << name;
+ if (!defval) {
+ out << ":" << type_name(type);
+ }
+ out << " = " << value->get_integer() << ";" << endl << endl;
+ } else if (type->is_struct() || type->is_xception()) {
+ const vector& fields = ((t_struct*)type)->get_members();
+ vector::const_iterator f_iter;
+ const map& val = value->get_map();
+ map::const_iterator v_iter;
+ out << name << ":" << type_name(type) << " = new " << type_name(type, false, true) << "();"
+ << endl;
+ if (!in_static) {
+ indent(out) << "{" << endl;
+ indent_up();
+ indent(out) << "new function():void {" << endl;
+ indent_up();
+ }
+ for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
+ t_type* field_type = NULL;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if ((*f_iter)->get_name() == v_iter->first->get_string()) {
+ field_type = (*f_iter)->get_type();
+ }
+ }
+ if (field_type == NULL) {
+ throw "type error: " + type->get_name() + " has no field " + v_iter->first->get_string();
+ }
+ string val = render_const_value(out, name, field_type, v_iter->second);
+ indent(out) << name << ".";
+ out << v_iter->first->get_string() << " = " << val << ";" << endl;
+ }
+ if (!in_static) {
+ indent_down();
+ indent(out) << "}();" << endl;
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ out << endl;
+ } else if (type->is_map()) {
+ out << name;
+ if (!defval) {
+ out << ":" << type_name(type);
+ }
+ out << " = new " << type_name(type, false, true) << "();" << endl;
+ if (!in_static) {
+ indent(out) << "{" << endl;
+ indent_up();
+ indent(out) << "new function():void {" << endl;
+ indent_up();
+ }
+ t_type* ktype = ((t_map*)type)->get_key_type();
+ t_type* vtype = ((t_map*)type)->get_val_type();
+ const map& val = value->get_map();
+ map::const_iterator v_iter;
+ for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
+ string key = render_const_value(out, name, ktype, v_iter->first);
+ string val = render_const_value(out, name, vtype, v_iter->second);
+ indent(out) << name << "[" << key << "] = " << val << ";" << endl;
+ }
+ if (!in_static) {
+ indent_down();
+ indent(out) << "}();" << endl;
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ out << endl;
+ } else if (type->is_list() || type->is_set()) {
+ out << name;
+ if (!defval) {
+ out << ":" << type_name(type);
+ }
+ out << " = new " << type_name(type, false, true) << "();" << endl;
+ if (!in_static) {
+ indent(out) << "{" << endl;
+ indent_up();
+ indent(out) << "new function():void {" << endl;
+ indent_up();
+ }
+ t_type* etype;
+ if (type->is_list()) {
+ etype = ((t_list*)type)->get_elem_type();
+ } else {
+ etype = ((t_set*)type)->get_elem_type();
+ }
+ const vector& val = value->get_list();
+ vector::const_iterator v_iter;
+ for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
+ string val = render_const_value(out, name, etype, *v_iter);
+ indent(out) << name << "." << (type->is_list() ? "push" : "add") << "(" << val << ");"
+ << endl;
+ }
+ if (!in_static) {
+ indent_down();
+ indent(out) << "}();" << endl;
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ out << endl;
+ } else {
+ throw "compiler error: no const of type " + type->get_name();
+ }
+}
+
+string t_as3_generator::render_const_value(ofstream& out,
+ string name,
+ t_type* type,
+ t_const_value* value) {
+ (void)name;
+ type = get_true_type(type);
+ std::ostringstream render;
+
+ if (type->is_base_type()) {
+ t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
+ switch (tbase) {
+ case t_base_type::TYPE_STRING:
+ render << '"' << get_escaped_string(value) << '"';
+ break;
+ case t_base_type::TYPE_BOOL:
+ render << ((value->get_integer() > 0) ? "true" : "false");
+ break;
+ case t_base_type::TYPE_I8:
+ render << "(byte)" << value->get_integer();
+ break;
+ case t_base_type::TYPE_I16:
+ render << "(short)" << value->get_integer();
+ break;
+ case t_base_type::TYPE_I32:
+ render << value->get_integer();
+ break;
+ case t_base_type::TYPE_I64:
+ render << value->get_integer() << "L";
+ break;
+ case t_base_type::TYPE_DOUBLE:
+ if (value->get_type() == t_const_value::CV_INTEGER) {
+ render << "(double)" << value->get_integer();
+ } else {
+ render << value->get_double();
+ }
+ break;
+ default:
+ throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase);
+ }
+ } else if (type->is_enum()) {
+ render << value->get_integer();
+ } else {
+ string t = tmp("tmp");
+ print_const_value(out, t, type, value, true);
+ render << t;
+ }
+
+ return render.str();
+}
+
+/**
+ * Generates a struct definition for a thrift data type. This is a class
+ * with data members, read(), write(), and an inner Isset class.
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_struct(t_struct* tstruct) {
+ generate_as3_struct(tstruct, false);
+}
+
+/**
+ * Exceptions are structs, but they inherit from Exception
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_xception(t_struct* txception) {
+ generate_as3_struct(txception, true);
+}
+
+/**
+ * As3 struct definition.
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_struct(t_struct* tstruct, bool is_exception) {
+ // Make output file
+ string f_struct_name = package_dir_ + "/" + (tstruct->get_name()) + ".as";
+ ofstream f_struct;
+ f_struct.open(f_struct_name.c_str());
+
+ f_struct << autogen_comment() << as3_package();
+
+ scope_up(f_struct);
+ f_struct << endl;
+
+ string imports;
+
+ f_struct << as3_type_imports() << as3_thrift_imports() << as3_thrift_gen_imports(tstruct, imports)
+ << endl;
+
+ if (bindable_ && !is_exception) {
+ f_struct << "import flash.events.Event;" << endl << "import flash.events.EventDispatcher;"
+ << endl << "import mx.events.PropertyChangeEvent;" << endl;
+ }
+
+ generate_as3_struct_definition(f_struct, tstruct, is_exception);
+
+ scope_down(f_struct); // end of package
+ f_struct.close();
+}
+
+/**
+ * As3 struct definition. This has various parameters, as it could be
+ * generated standalone or inside another class as a helper. If it
+ * is a helper than it is a static class.
+ *
+ * @param tstruct The struct definition
+ * @param is_exception Is this an exception?
+ * @param in_class If inside a class, needs to be static class
+ * @param is_result If this is a result it needs a different writer
+ */
+void t_as3_generator::generate_as3_struct_definition(ofstream& out,
+ t_struct* tstruct,
+ bool is_exception,
+ bool in_class,
+ bool is_result) {
+ generate_as3_doc(out, tstruct);
+
+ bool is_final = (tstruct->annotations_.find("final") != tstruct->annotations_.end());
+ bool bindable = !is_exception && !in_class && bindable_;
+
+ indent(out) << (in_class ? "" : "public ") << (is_final ? "final " : "") << "class "
+ << tstruct->get_name() << " ";
+
+ if (is_exception) {
+ out << "extends Error ";
+ } else if (bindable) {
+ out << "extends EventDispatcher ";
+ }
+ out << "implements TBase ";
+
+ scope_up(out);
+
+ indent(out) << "private static const STRUCT_DESC:TStruct = new TStruct(\"" << tstruct->get_name()
+ << "\");" << endl;
+
+ // Members are public for -as3, private for -as3bean
+ const vector& members = tstruct->get_members();
+ vector::const_iterator m_iter;
+
+ for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
+ indent(out) << "private static const " << constant_name((*m_iter)->get_name())
+ << "_FIELD_DESC:TField = new TField(\"" << (*m_iter)->get_name() << "\", "
+ << type_to_enum((*m_iter)->get_type()) << ", " << (*m_iter)->get_key() << ");"
+ << endl;
+ }
+
+ out << endl;
+
+ for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
+ generate_as3_doc(out, *m_iter);
+ indent(out) << "private var _" << (*m_iter)->get_name() + ":" + type_name((*m_iter)->get_type())
+ << ";" << endl;
+
+ indent(out) << "public static const " << upcase_string((*m_iter)->get_name())
+ << ":int = " << (*m_iter)->get_key() << ";" << endl;
+ }
+
+ out << endl;
+
+ // Inner Isset class
+ if (members.size() > 0) {
+ for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
+ if (!type_can_be_null((*m_iter)->get_type())) {
+ indent(out) << "private var __isset_" << (*m_iter)->get_name() << ":Boolean = false;"
+ << endl;
+ }
+ }
+ }
+
+ out << endl;
+
+ generate_as3_meta_data_map(out, tstruct);
+
+ // Static initializer to populate global class to struct metadata map
+ indent(out) << "{" << endl;
+ indent_up();
+ indent(out) << "FieldMetaData.addStructMetaDataMap(" << type_name(tstruct) << ", metaDataMap);"
+ << endl;
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ // Default constructor
+ indent(out) << "public function " << tstruct->get_name() << "() {" << endl;
+ indent_up();
+ for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
+ if ((*m_iter)->get_value() != NULL) {
+ indent(out) << "this._" << (*m_iter)->get_name() << " = "
+ << (*m_iter)->get_value()->get_integer() << ";" << endl;
+ }
+ }
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ generate_as3_bean_boilerplate(out, tstruct, bindable);
+ generate_generic_field_getters_setters(out, tstruct);
+ generate_generic_isset_method(out, tstruct);
+
+ generate_as3_struct_reader(out, tstruct);
+ if (is_result) {
+ generate_as3_struct_result_writer(out, tstruct);
+ } else {
+ generate_as3_struct_writer(out, tstruct);
+ }
+ generate_as3_struct_tostring(out, tstruct, bindable);
+ generate_as3_validator(out, tstruct);
+ scope_down(out);
+ out << endl;
+}
+
+/**
+ * Generates a function to read all the fields of the struct.
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_struct_reader(ofstream& out, t_struct* tstruct) {
+ out << indent() << "public function read(iprot:TProtocol):void {" << endl;
+ indent_up();
+
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+
+ // Declare stack tmp variables and read struct header
+ out << indent() << "var field:TField;" << endl << indent() << "iprot.readStructBegin();" << endl;
+
+ // Loop over reading in fields
+ indent(out) << "while (true)" << endl;
+ scope_up(out);
+
+ // Read beginning field marker
+ indent(out) << "field = iprot.readFieldBegin();" << endl;
+
+ // Check for field STOP marker and break
+ indent(out) << "if (field.type == TType.STOP) { " << endl;
+ indent_up();
+ indent(out) << "break;" << endl;
+ indent_down();
+ indent(out) << "}" << endl;
+
+ // Switch statement on the field we are reading
+ indent(out) << "switch (field.id)" << endl;
+
+ scope_up(out);
+
+ // Generate deserialization code for known cases
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ indent(out) << "case " << upcase_string((*f_iter)->get_name()) << ":" << endl;
+ indent_up();
+ indent(out) << "if (field.type == " << type_to_enum((*f_iter)->get_type()) << ") {" << endl;
+ indent_up();
+
+ generate_deserialize_field(out, *f_iter, "this.");
+ generate_isset_set(out, *f_iter);
+ indent_down();
+ out << indent() << "} else { " << endl << indent() << " TProtocolUtil.skip(iprot, field.type);"
+ << endl << indent() << "}" << endl << indent() << "break;" << endl;
+ indent_down();
+ }
+
+ // In the default case we skip the field
+ out << indent() << "default:" << endl << indent() << " TProtocolUtil.skip(iprot, field.type);"
+ << endl << indent() << " break;" << endl;
+
+ scope_down(out);
+
+ // Read field end marker
+ indent(out) << "iprot.readFieldEnd();" << endl;
+
+ scope_down(out);
+
+ out << indent() << "iprot.readStructEnd();" << endl << endl;
+
+ // in non-beans style, check for required fields of primitive type
+ // (which can be checked here but not in the general validate method)
+ out << endl << indent() << "// check for required fields of primitive type, which can't be "
+ "checked in the validate method" << endl;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if ((*f_iter)->get_req() == t_field::T_REQUIRED && !type_can_be_null((*f_iter)->get_type())) {
+ out << indent() << "if (!__isset_" << (*f_iter)->get_name() << ") {" << endl << indent()
+ << " throw new TProtocolError(TProtocolError.UNKNOWN, \"Required field '"
+ << (*f_iter)->get_name()
+ << "' was not found in serialized data! Struct: \" + toString());" << endl << indent()
+ << "}" << endl;
+ }
+ }
+
+ // performs various checks (e.g. check that all required fields are set)
+ indent(out) << "validate();" << endl;
+
+ indent_down();
+ out << indent() << "}" << endl << endl;
+}
+
+// generates as3 method to perform various checks
+// (e.g. check that all required fields are set)
+void t_as3_generator::generate_as3_validator(ofstream& out, t_struct* tstruct) {
+ indent(out) << "public function validate():void {" << endl;
+ indent_up();
+
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+
+ out << indent() << "// check for required fields" << endl;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
+ if (type_can_be_null((*f_iter)->get_type())) {
+ indent(out) << "if (" << (*f_iter)->get_name() << " == null) {" << endl;
+ indent(out) << " throw new TProtocolError(TProtocolError.UNKNOWN, \"Required field '"
+ << (*f_iter)->get_name() << "' was not present! Struct: \" + toString());"
+ << endl;
+ indent(out) << "}" << endl;
+ } else {
+ indent(out) << "// alas, we cannot check '" << (*f_iter)->get_name()
+ << "' because it's a primitive and you chose the non-beans generator." << endl;
+ }
+ }
+ }
+
+ // check that fields of type enum have valid values
+ out << indent() << "// check that fields of type enum have valid values" << endl;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ t_field* field = (*f_iter);
+ t_type* type = field->get_type();
+ // if field is an enum, check that its value is valid
+ if (type->is_enum()) {
+ indent(out) << "if (" << generate_isset_check(field) << " && !" << get_enum_class_name(type)
+ << ".VALID_VALUES.contains(" << field->get_name() << ")){" << endl;
+ indent_up();
+ indent(out) << "throw new TProtocolError(TProtocolError.UNKNOWN, \"The field '"
+ << field->get_name() << "' has been assigned the invalid value \" + "
+ << field->get_name() << ");" << endl;
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ }
+
+ indent_down();
+ indent(out) << "}" << endl << endl;
+}
+
+/**
+ * Generates a function to write all the fields of the struct
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_struct_writer(ofstream& out, t_struct* tstruct) {
+ out << indent() << "public function write(oprot:TProtocol):void {" << endl;
+ indent_up();
+
+ string name = tstruct->get_name();
+ const vector& fields = tstruct->get_sorted_members();
+ vector::const_iterator f_iter;
+
+ // performs various checks (e.g. check that all required fields are set)
+ indent(out) << "validate();" << endl << endl;
+
+ indent(out) << "oprot.writeStructBegin(STRUCT_DESC);" << endl;
+
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ bool could_be_unset = (*f_iter)->get_req() == t_field::T_OPTIONAL;
+ if (could_be_unset) {
+ indent(out) << "if (" << generate_isset_check(*f_iter) << ") {" << endl;
+ indent_up();
+ }
+ bool null_allowed = type_can_be_null((*f_iter)->get_type());
+ if (null_allowed) {
+ out << indent() << "if (this." << (*f_iter)->get_name() << " != null) {" << endl;
+ indent_up();
+ }
+
+ indent(out) << "oprot.writeFieldBegin(" << constant_name((*f_iter)->get_name())
+ << "_FIELD_DESC);" << endl;
+
+ // Write field contents
+ generate_serialize_field(out, *f_iter, "this.");
+
+ // Write field closer
+ indent(out) << "oprot.writeFieldEnd();" << endl;
+
+ if (null_allowed) {
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ if (could_be_unset) {
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ }
+ // Write the struct map
+ out << indent() << "oprot.writeFieldStop();" << endl << indent() << "oprot.writeStructEnd();"
+ << endl;
+
+ indent_down();
+ out << indent() << "}" << endl << endl;
+}
+
+/**
+ * Generates a function to write all the fields of the struct,
+ * which is a function result. These fields are only written
+ * if they are set in the Isset array, and only one of them
+ * can be set at a time.
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_struct_result_writer(ofstream& out, t_struct* tstruct) {
+ out << indent() << "public function write(oprot:TProtocol):void {" << endl;
+ indent_up();
+
+ string name = tstruct->get_name();
+ const vector& fields = tstruct->get_sorted_members();
+ vector::const_iterator f_iter;
+
+ indent(out) << "oprot.writeStructBegin(STRUCT_DESC);" << endl;
+
+ bool first = true;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if (first) {
+ first = false;
+ out << endl << indent() << "if ";
+ } else {
+ out << " else if ";
+ }
+
+ out << "(this." << generate_isset_check(*f_iter) << ") {" << endl;
+
+ indent_up();
+
+ indent(out) << "oprot.writeFieldBegin(" << constant_name((*f_iter)->get_name())
+ << "_FIELD_DESC);" << endl;
+
+ // Write field contents
+ generate_serialize_field(out, *f_iter, "this.");
+
+ // Write field closer
+ indent(out) << "oprot.writeFieldEnd();" << endl;
+
+ indent_down();
+ indent(out) << "}";
+ }
+ // Write the struct map
+ out << endl << indent() << "oprot.writeFieldStop();" << endl << indent()
+ << "oprot.writeStructEnd();" << endl;
+
+ indent_down();
+ out << indent() << "}" << endl << endl;
+}
+
+void t_as3_generator::generate_reflection_getters(ostringstream& out,
+ t_type* type,
+ string field_name,
+ string cap_name) {
+ (void)type;
+ (void)cap_name;
+ indent(out) << "case " << upcase_string(field_name) << ":" << endl;
+ indent_up();
+ indent(out) << "return this." << field_name << ";" << endl;
+ indent_down();
+}
+
+void t_as3_generator::generate_reflection_setters(ostringstream& out,
+ t_type* type,
+ string field_name,
+ string cap_name) {
+ (void)type;
+ (void)cap_name;
+ indent(out) << "case " << upcase_string(field_name) << ":" << endl;
+ indent_up();
+ indent(out) << "if (value == null) {" << endl;
+ indent(out) << " unset" << get_cap_name(field_name) << "();" << endl;
+ indent(out) << "} else {" << endl;
+ indent(out) << " this." << field_name << " = value;" << endl;
+ indent(out) << "}" << endl;
+ indent(out) << "break;" << endl << endl;
+
+ indent_down();
+}
+
+void t_as3_generator::generate_generic_field_getters_setters(std::ofstream& out,
+ t_struct* tstruct) {
+
+ std::ostringstream getter_stream;
+ std::ostringstream setter_stream;
+
+ // build up the bodies of both the getter and setter at once
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ t_field* field = *f_iter;
+ t_type* type = get_true_type(field->get_type());
+ std::string field_name = field->get_name();
+ std::string cap_name = get_cap_name(field_name);
+
+ indent_up();
+ generate_reflection_setters(setter_stream, type, field_name, cap_name);
+ generate_reflection_getters(getter_stream, type, field_name, cap_name);
+ indent_down();
+ }
+
+ // create the setter
+ indent(out) << "public function setFieldValue(fieldID:int, value:*):void {" << endl;
+ indent_up();
+
+ indent(out) << "switch (fieldID) {" << endl;
+
+ out << setter_stream.str();
+
+ indent(out) << "default:" << endl;
+ indent(out) << " throw new ArgumentError(\"Field \" + fieldID + \" doesn't exist!\");" << endl;
+
+ indent(out) << "}" << endl;
+
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ // create the getter
+ indent(out) << "public function getFieldValue(fieldID:int):* {" << endl;
+ indent_up();
+
+ indent(out) << "switch (fieldID) {" << endl;
+
+ out << getter_stream.str();
+
+ indent(out) << "default:" << endl;
+ indent(out) << " throw new ArgumentError(\"Field \" + fieldID + \" doesn't exist!\");" << endl;
+
+ indent(out) << "}" << endl;
+
+ indent_down();
+
+ indent(out) << "}" << endl << endl;
+}
+
+// Creates a generic isSet method that takes the field number as argument
+void t_as3_generator::generate_generic_isset_method(std::ofstream& out, t_struct* tstruct) {
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+
+ // create the isSet method
+ indent(out) << "// Returns true if field corresponding to fieldID is set (has been assigned a "
+ "value) and false otherwise" << endl;
+ indent(out) << "public function isSet(fieldID:int):Boolean {" << endl;
+ indent_up();
+ indent(out) << "switch (fieldID) {" << endl;
+
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ t_field* field = *f_iter;
+ indent(out) << "case " << upcase_string(field->get_name()) << ":" << endl;
+ indent_up();
+ indent(out) << "return " << generate_isset_check(field) << ";" << endl;
+ indent_down();
+ }
+
+ indent(out) << "default:" << endl;
+ indent(out) << " throw new ArgumentError(\"Field \" + fieldID + \" doesn't exist!\");" << endl;
+
+ indent(out) << "}" << endl;
+
+ indent_down();
+ indent(out) << "}" << endl << endl;
+}
+
+/**
+ * Generates a set of As3 Bean boilerplate functions (setters, getters, etc.)
+ * for the given struct.
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_bean_boilerplate(ofstream& out,
+ t_struct* tstruct,
+ bool bindable) {
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ t_field* field = *f_iter;
+ t_type* type = get_true_type(field->get_type());
+ std::string field_name = field->get_name();
+ std::string cap_name = get_cap_name(field_name);
+
+ // Simple getter
+ generate_as3_doc(out, field);
+ indent(out) << "public function get " << field_name << "():" << type_name(type) << " {" << endl;
+ indent_up();
+ indent(out) << "return this._" << field_name << ";" << endl;
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ // Simple setter
+ generate_as3_doc(out, field);
+ std::string propName = tmp("thriftPropertyChange");
+ if (bindable) {
+ indent(out) << "[Bindable(event=\"" << propName << "\")]" << endl;
+ }
+ indent(out) << "public function set " << field_name << "(" << field_name << ":"
+ << type_name(type) << "):void {" << endl;
+ indent_up();
+ indent(out) << "this._" << field_name << " = " << field_name << ";" << endl;
+ generate_isset_set(out, field);
+
+ if (bindable) {
+ // We have to use a custom event rather than the default, because if you use the default,
+ // the setter only gets called if the value has changed - this means calling
+ // foo.setIntValue(0)
+ // will not cause foo.isIntValueSet() to return true since the value of foo._intValue wasn't
+ // changed
+ // so the setter was never called.
+ indent(out) << "dispatchEvent(new Event(\"" << propName << "\"));" << endl;
+
+ // However, if you just use a custom event, then collections won't be able to detect when
+ // elements
+ // in the collections have changed since they listed for PropertyChangeEvents. So, we
+ // dispatch both.
+ indent(out) << "dispatchEvent(new PropertyChangeEvent(PropertyChangeEvent.PROPERTY_CHANGE));"
+ << endl;
+ }
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ // Unsetter
+ indent(out) << "public function unset" << cap_name << "():void {" << endl;
+ indent_up();
+ if (type_can_be_null(type)) {
+ indent(out) << "this." << field_name << " = null;" << endl;
+ } else {
+ indent(out) << "this.__isset_" << field_name << " = false;" << endl;
+ }
+ indent_down();
+ indent(out) << "}" << endl << endl;
+
+ // isSet method
+ indent(out) << "// Returns true if field " << field_name
+ << " is set (has been assigned a value) and false otherwise" << endl;
+ indent(out) << "public function is" << get_cap_name("set") << cap_name << "():Boolean {"
+ << endl;
+ indent_up();
+ if (type_can_be_null(type)) {
+ indent(out) << "return this." << field_name << " != null;" << endl;
+ } else {
+ indent(out) << "return this.__isset_" << field_name << ";" << endl;
+ }
+ indent_down();
+ indent(out) << "}" << endl << endl;
+ }
+}
+
+/**
+ * Generates a toString() method for the given struct
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_struct_tostring(ofstream& out,
+ t_struct* tstruct,
+ bool bindable) {
+ // If it's bindable, it extends EventDispatcher so toString is an override.
+ out << indent() << "public " << (bindable ? "override " : "") << "function toString():String {"
+ << endl;
+ indent_up();
+
+ out << indent() << "var ret:String = new String(\"" << tstruct->get_name() << "(\");" << endl;
+ out << indent() << "var first:Boolean = true;" << endl << endl;
+
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+ bool first = true;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ bool could_be_unset = (*f_iter)->get_req() == t_field::T_OPTIONAL;
+ if (could_be_unset) {
+ indent(out) << "if (" << generate_isset_check(*f_iter) << ") {" << endl;
+ indent_up();
+ }
+
+ t_field* field = (*f_iter);
+
+ if (!first) {
+ indent(out) << "if (!first) ret += \", \";" << endl;
+ }
+ indent(out) << "ret += \"" << (*f_iter)->get_name() << ":\";" << endl;
+ bool can_be_null = type_can_be_null(field->get_type());
+ if (can_be_null) {
+ indent(out) << "if (this." << (*f_iter)->get_name() << " == null) {" << endl;
+ indent(out) << " ret += \"null\";" << endl;
+ indent(out) << "} else {" << endl;
+ indent_up();
+ }
+
+ if (field->get_type()->is_binary()) {
+ indent(out) << " ret += \"BINARY\";" << endl;
+ } else if (field->get_type()->is_enum()) {
+ indent(out) << "var " << field->get_name()
+ << "_name:String = " << get_enum_class_name(field->get_type())
+ << ".VALUES_TO_NAMES[this." << (*f_iter)->get_name() << "];" << endl;
+ indent(out) << "if (" << field->get_name() << "_name != null) {" << endl;
+ indent(out) << " ret += " << field->get_name() << "_name;" << endl;
+ indent(out) << " ret += \" (\";" << endl;
+ indent(out) << "}" << endl;
+ indent(out) << "ret += this." << field->get_name() << ";" << endl;
+ indent(out) << "if (" << field->get_name() << "_name != null) {" << endl;
+ indent(out) << " ret += \")\";" << endl;
+ indent(out) << "}" << endl;
+ } else {
+ indent(out) << "ret += this." << (*f_iter)->get_name() << ";" << endl;
+ }
+
+ if (can_be_null) {
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ indent(out) << "first = false;" << endl;
+
+ if (could_be_unset) {
+ indent_down();
+ indent(out) << "}" << endl;
+ }
+ first = false;
+ }
+ out << indent() << "ret += \")\";" << endl << indent() << "return ret;" << endl;
+
+ indent_down();
+ indent(out) << "}" << endl << endl;
+}
+
+/**
+ * Generates a static map with meta data to store information such as fieldID to
+ * fieldName mapping
+ *
+ * @param tstruct The struct definition
+ */
+void t_as3_generator::generate_as3_meta_data_map(ofstream& out, t_struct* tstruct) {
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+
+ // Static Map with fieldID -> FieldMetaData mappings
+ indent(out) << "public static const metaDataMap:Dictionary = new Dictionary();" << endl;
+
+ if (fields.size() > 0) {
+ // Populate map
+ scope_up(out);
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ t_field* field = *f_iter;
+ std::string field_name = field->get_name();
+ indent(out) << "metaDataMap[" << upcase_string(field_name) << "] = new FieldMetaData(\""
+ << field_name << "\", ";
+
+ // Set field requirement type (required, optional, etc.)
+ if (field->get_req() == t_field::T_REQUIRED) {
+ out << "TFieldRequirementType.REQUIRED, ";
+ } else if (field->get_req() == t_field::T_OPTIONAL) {
+ out << "TFieldRequirementType.OPTIONAL, ";
+ } else {
+ out << "TFieldRequirementType.DEFAULT, ";
+ }
+
+ // Create value meta data
+ generate_field_value_meta_data(out, field->get_type());
+ out << ");" << endl;
+ }
+ scope_down(out);
+ }
+}
+
+/**
+ * Returns a string with the as3 representation of the given thrift type
+ * (e.g. for the type struct it returns "TType.STRUCT")
+ */
+std::string t_as3_generator::get_as3_type_string(t_type* type) {
+ if (type->is_list()) {
+ return "TType.LIST";
+ } else if (type->is_map()) {
+ return "TType.MAP";
+ } else if (type->is_set()) {
+ return "TType.SET";
+ } else if (type->is_struct() || type->is_xception()) {
+ return "TType.STRUCT";
+ } else if (type->is_enum()) {
+ return "TType.I32";
+ } else if (type->is_typedef()) {
+ return get_as3_type_string(((t_typedef*)type)->get_type());
+ } else if (type->is_base_type()) {
+ switch (((t_base_type*)type)->get_base()) {
+ case t_base_type::TYPE_VOID:
+ return "TType.VOID";
+ break;
+ case t_base_type::TYPE_STRING:
+ return "TType.STRING";
+ break;
+ case t_base_type::TYPE_BOOL:
+ return "TType.BOOL";
+ break;
+ case t_base_type::TYPE_I8:
+ return "TType.BYTE";
+ break;
+ case t_base_type::TYPE_I16:
+ return "TType.I16";
+ break;
+ case t_base_type::TYPE_I32:
+ return "TType.I32";
+ break;
+ case t_base_type::TYPE_I64:
+ return "TType.I64";
+ break;
+ case t_base_type::TYPE_DOUBLE:
+ return "TType.DOUBLE";
+ break;
+ default:
+ throw std::runtime_error("Unknown thrift type \"" + type->get_name()
+ + "\" passed to t_as3_generator::get_as3_type_string!");
+ break; // This should never happen!
+ }
+ } else {
+ throw std::runtime_error(
+ "Unknown thrift type \"" + type->get_name()
+ + "\" passed to t_as3_generator::get_as3_type_string!"); // This should never happen!
+ }
+}
+
+void t_as3_generator::generate_field_value_meta_data(std::ofstream& out, t_type* type) {
+ out << endl;
+ indent_up();
+ indent_up();
+ if (type->is_struct() || type->is_xception()) {
+ indent(out) << "new StructMetaData(TType.STRUCT, " << type_name(type);
+ } else if (type->is_container()) {
+ if (type->is_list()) {
+ indent(out) << "new ListMetaData(TType.LIST, ";
+ t_type* elem_type = ((t_list*)type)->get_elem_type();
+ generate_field_value_meta_data(out, elem_type);
+ } else if (type->is_set()) {
+ indent(out) << "new SetMetaData(TType.SET, ";
+ t_type* elem_type = ((t_list*)type)->get_elem_type();
+ generate_field_value_meta_data(out, elem_type);
+ } else { // map
+ indent(out) << "new MapMetaData(TType.MAP, ";
+ t_type* key_type = ((t_map*)type)->get_key_type();
+ t_type* val_type = ((t_map*)type)->get_val_type();
+ generate_field_value_meta_data(out, key_type);
+ out << ", ";
+ generate_field_value_meta_data(out, val_type);
+ }
+ } else {
+ indent(out) << "new FieldValueMetaData(" << get_as3_type_string(type);
+ }
+ out << ")";
+ indent_down();
+ indent_down();
+}
+
+/**
+ * Generates a thrift service. In C++, this comprises an entirely separate
+ * header and source file. The header file defines the methods and includes
+ * the data types defined in the main header file, and the implementation
+ * file contains implementations of the basic printer and default interfaces.
+ *
+ * @param tservice The service definition
+ */
+void t_as3_generator::generate_service(t_service* tservice) {
+ // Make interface file
+ string f_service_name = package_dir_ + "/" + service_name_ + ".as";
+ f_service_.open(f_service_name.c_str());
+
+ f_service_ << autogen_comment() << as3_package();
+
+ scope_up(f_service_);
+
+ f_service_ << endl << as3_type_imports() << as3_thrift_imports()
+ << as3_thrift_gen_imports(tservice);
+
+ if (tservice->get_extends() != NULL) {
+ t_type* parent = tservice->get_extends();
+ string parent_namespace = parent->get_program()->get_namespace("as3");
+ if (!parent_namespace.empty() && parent_namespace != package_name_) {
+ f_service_ << "import " << type_name(parent) << ";" << endl;
+ }
+ }
+
+ f_service_ << endl;
+
+ generate_service_interface(tservice);
+
+ scope_down(f_service_);
+ f_service_.close();
+
+ // Now make the implementation/client file
+ f_service_name = package_dir_ + "/" + service_name_ + "Impl.as";
+ f_service_.open(f_service_name.c_str());
+
+ f_service_ << autogen_comment() << as3_package();
+
+ scope_up(f_service_);
+
+ f_service_ << endl << as3_type_imports() << as3_thrift_imports()
+ << as3_thrift_gen_imports(tservice);
+
+ if (tservice->get_extends() != NULL) {
+ t_type* parent = tservice->get_extends();
+ string parent_namespace = parent->get_program()->get_namespace("as3");
+ if (!parent_namespace.empty() && parent_namespace != package_name_) {
+ f_service_ << "import " << type_name(parent) << "Impl;" << endl;
+ }
+ }
+
+ f_service_ << endl;
+
+ generate_service_client(tservice);
+ scope_down(f_service_);
+
+ f_service_ << as3_type_imports();
+ f_service_ << as3_thrift_imports();
+ f_service_ << as3_thrift_gen_imports(tservice);
+ if (!package_name_.empty()) {
+ f_service_ << "import " << package_name_ << ".*;" << endl;
+ }
+
+ generate_service_helpers(tservice);
+
+ f_service_.close();
+
+ // Now make the processor/server file
+ f_service_name = package_dir_ + "/" + service_name_ + "Processor.as";
+ f_service_.open(f_service_name.c_str());
+
+ f_service_ << autogen_comment() << as3_package();
+
+ scope_up(f_service_);
+
+ f_service_ << endl << as3_type_imports() << as3_thrift_imports()
+ << as3_thrift_gen_imports(tservice) << endl;
+
+ generate_service_server(tservice);
+ scope_down(f_service_);
+
+ f_service_ << as3_type_imports();
+ f_service_ << as3_thrift_imports();
+ f_service_ << as3_thrift_gen_imports(tservice) << endl;
+ if (!package_name_.empty()) {
+ f_service_ << "import " << package_name_ << ".*;" << endl;
+ }
+
+ generate_service_helpers(tservice);
+
+ f_service_.close();
+}
+
+/**
+ * Generates a service interface definition.
+ *
+ * @param tservice The service to generate a header definition for
+ */
+void t_as3_generator::generate_service_interface(t_service* tservice) {
+ string extends_iface = "";
+ if (tservice->get_extends() != NULL) {
+ extends_iface = " extends " + tservice->get_extends()->get_name();
+ }
+
+ generate_as3_doc(f_service_, tservice);
+ f_service_ << indent() << "public interface " << service_name_ << extends_iface << " {" << endl
+ << endl;
+ indent_up();
+ vector functions = tservice->get_functions();
+ vector::iterator f_iter;
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ generate_as3_doc(f_service_, *f_iter);
+ if (!(*f_iter)->is_oneway()) {
+ if ((*f_iter)->get_returntype()->is_void()) {
+ indent(f_service_) << "//function onError(Error):void;" << endl;
+ indent(f_service_) << "//function onSuccess():void;" << endl;
+ } else {
+ indent(f_service_) << "//function onError(Error):void;" << endl;
+ indent(f_service_) << "//function onSuccess(" << type_name((*f_iter)->get_returntype())
+ << "):void;" << endl;
+ }
+ }
+ indent(f_service_) << function_signature(*f_iter) << ";" << endl << endl;
+ }
+ indent_down();
+ f_service_ << indent() << "}" << endl << endl;
+}
+
+/**
+ * Generates structs for all the service args and return types
+ *
+ * @param tservice The service
+ */
+void t_as3_generator::generate_service_helpers(t_service* tservice) {
+ vector functions = tservice->get_functions();
+ vector::iterator f_iter;
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ t_struct* ts = (*f_iter)->get_arglist();
+ generate_as3_struct_definition(f_service_, ts, false, true);
+ generate_function_helpers(*f_iter);
+ }
+}
+
+/**
+ * Generates a service client definition.
+ *
+ * @param tservice The service to generate a server for.
+ */
+void t_as3_generator::generate_service_client(t_service* tservice) {
+ string extends = "";
+ string extends_client = "";
+ if (tservice->get_extends() != NULL) {
+ extends = tservice->get_extends()->get_name();
+ extends_client = " extends " + extends + "Impl";
+ }
+
+ indent(f_service_) << "public class " << service_name_ << "Impl" << extends_client
+ << " implements " << service_name_ << " {" << endl;
+ indent_up();
+
+ indent(f_service_) << "public function " << service_name_ << "Impl"
+ << "(iprot:TProtocol, oprot:TProtocol=null)" << endl;
+ scope_up(f_service_);
+ if (extends.empty()) {
+ f_service_ << indent() << "iprot_ = iprot;" << endl;
+ f_service_ << indent() << "if (oprot == null) {" << endl;
+ indent_up();
+ f_service_ << indent() << "oprot_ = iprot;" << endl;
+ indent_down();
+ f_service_ << indent() << "} else {" << endl;
+ indent_up();
+ f_service_ << indent() << "oprot_ = oprot;" << endl;
+ indent_down();
+ f_service_ << indent() << "}";
+ } else {
+ f_service_ << indent() << "super(iprot, oprot);" << endl;
+ }
+ scope_down(f_service_);
+ f_service_ << endl;
+
+ if (extends.empty()) {
+ f_service_ << indent() << "protected var iprot_:TProtocol;" << endl << indent()
+ << "protected var oprot_:TProtocol;" << endl << endl << indent()
+ << "protected var seqid_:int;" << endl << endl;
+
+ indent(f_service_) << "public function getInputProtocol():TProtocol" << endl;
+ scope_up(f_service_);
+ indent(f_service_) << "return this.iprot_;" << endl;
+ scope_down(f_service_);
+ f_service_ << endl;
+
+ indent(f_service_) << "public function getOutputProtocol():TProtocol" << endl;
+ scope_up(f_service_);
+ indent(f_service_) << "return this.oprot_;" << endl;
+ scope_down(f_service_);
+ f_service_ << endl;
+ }
+
+ // Generate client method implementations
+ vector functions = tservice->get_functions();
+ vector::const_iterator f_iter;
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ string funname = (*f_iter)->get_name();
+
+ // Open function
+ if (!(*f_iter)->is_oneway()) {
+ if ((*f_iter)->get_returntype()->is_void()) {
+ indent(f_service_) << "//function onError(Error):void;" << endl;
+ indent(f_service_) << "//function onSuccess():void;" << endl;
+ } else {
+ indent(f_service_) << "//function onError(Error):void;" << endl;
+ indent(f_service_) << "//function onSuccess(" << type_name((*f_iter)->get_returntype())
+ << "):void;" << endl;
+ }
+ }
+ indent(f_service_) << "public " << function_signature(*f_iter) << endl;
+ scope_up(f_service_);
+
+ // Get the struct of function call params
+ t_struct* arg_struct = (*f_iter)->get_arglist();
+
+ string argsname = (*f_iter)->get_name() + "_args";
+ vector::const_iterator fld_iter;
+ const vector& fields = arg_struct->get_members();
+
+ // Serialize the request
+ f_service_ << indent() << "oprot_.writeMessageBegin(new TMessage(\"" << funname << "\", "
+ << ((*f_iter)->is_oneway() ? "TMessageType.ONEWAY" : "TMessageType.CALL")
+ << ", seqid_));" << endl << indent() << "var args:" << argsname << " = new "
+ << argsname << "();" << endl;
+
+ for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) {
+ f_service_ << indent() << "args." << (*fld_iter)->get_name() << " = "
+ << (*fld_iter)->get_name() << ";" << endl;
+ }
+
+ f_service_ << indent() << "args.write(oprot_);" << endl << indent()
+ << "oprot_.writeMessageEnd();" << endl;
+
+ if ((*f_iter)->is_oneway()) {
+ f_service_ << indent() << "oprot_.getTransport().flush();" << endl;
+ } else {
+ f_service_ << indent() << "oprot_.getTransport().flush(function(error:Error):void {" << endl;
+ indent_up();
+ f_service_ << indent() << "try {" << endl;
+ indent_up();
+ string resultname = (*f_iter)->get_name() + "_result";
+ f_service_ << indent() << "if (error != null) {" << endl << indent()
+ << " if (onError != null) onError(error);" << endl << indent() << " return;"
+ << endl << indent() << "}" << endl << indent()
+ << "var msg:TMessage = iprot_.readMessageBegin();" << endl << indent()
+ << "if (msg.type == TMessageType.EXCEPTION) {" << endl << indent()
+ << " var x:TApplicationError = TApplicationError.read(iprot_);" << endl
+ << indent() << " iprot_.readMessageEnd();" << endl << indent()
+ << " if (onError != null) onError(x);" << endl << indent() << " return;" << endl
+ << indent() << "}" << endl << indent() << "var result :" << resultname << " = new "
+ << resultname << "();" << endl << indent() << "result.read(iprot_);" << endl
+ << indent() << "iprot_.readMessageEnd();" << endl;
+
+ // Careful, only return _result if not a void function
+ if (!(*f_iter)->get_returntype()->is_void()) {
+ f_service_ << indent() << "if (result." << generate_isset_check("success") << ") {" << endl
+ << indent() << " if (onSuccess != null) onSuccess(result.success);" << endl
+ << indent() << " return;" << endl << indent() << "}" << endl;
+ }
+
+ t_struct* xs = (*f_iter)->get_xceptions();
+ const std::vector& xceptions = xs->get_members();
+ vector::const_iterator x_iter;
+ for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) {
+ f_service_ << indent() << "if (result." << (*x_iter)->get_name() << " != null) {" << endl
+ << indent() << " if (onError != null) onError(result." << (*x_iter)->get_name()
+ << ");" << endl << indent() << " return;" << endl << indent() << "}" << endl;
+ }
+
+ // If you get here it's an exception, unless a void function
+ if ((*f_iter)->get_returntype()->is_void()) {
+ f_service_ << indent() << "if (onSuccess != null) onSuccess();" << endl << indent()
+ << "return;" << endl;
+ } else {
+
+ f_service_ << indent() << "if (onError != null) onError(new "
+ "TApplicationError(TApplicationError.MISSING_RESULT, \""
+ << (*f_iter)->get_name() << " failed: unknown result\"));" << endl;
+ }
+ indent_down();
+ f_service_ << indent() << "} catch (e:TError) {" << endl << indent()
+ << " if (onError != null) onError(e);" << endl << indent() << "}" << endl;
+
+ indent_down();
+ indent(f_service_) << "});" << endl;
+ }
+ // Close function
+ scope_down(f_service_);
+ f_service_ << endl;
+ }
+
+ indent_down();
+ indent(f_service_) << "}" << endl;
+}
+
+/**
+ * Generates a service server definition.
+ *
+ * @param tservice The service to generate a server for.
+ */
+void t_as3_generator::generate_service_server(t_service* tservice) {
+ // Generate the dispatch methods
+ vector functions = tservice->get_functions();
+ vector::iterator f_iter;
+
+ // Extends stuff
+ string extends = "";
+ string extends_processor = "";
+ if (tservice->get_extends() != NULL) {
+ extends = type_name(tservice->get_extends());
+ extends_processor = " extends " + extends + "Processor";
+ }
+
+ // Generate the header portion
+ indent(f_service_) << "public class " << service_name_ << "Processor" << extends_processor
+ << " implements TProcessor {" << endl;
+ indent_up();
+
+ indent(f_service_) << "public function " << service_name_ << "Processor(iface:" << service_name_
+ << ")" << endl;
+ scope_up(f_service_);
+ if (!extends.empty()) {
+ f_service_ << indent() << "super(iface);" << endl;
+ }
+ f_service_ << indent() << "iface_ = iface;" << endl;
+
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ f_service_ << indent() << "PROCESS_MAP[\"" << (*f_iter)->get_name()
+ << "\"] = " << (*f_iter)->get_name() << "();" << endl;
+ }
+
+ scope_down(f_service_);
+ f_service_ << endl;
+
+ f_service_ << indent() << "private var iface_:" << service_name_ << ";" << endl;
+
+ if (extends.empty()) {
+ f_service_ << indent() << "protected const PROCESS_MAP:Dictionary = new Dictionary();" << endl;
+ }
+
+ f_service_ << endl;
+
+ // Generate the server implementation
+ string override = "";
+ if (tservice->get_extends() != NULL) {
+ override = "override ";
+ }
+ indent(f_service_) << override
+ << "public function process(iprot:TProtocol, oprot:TProtocol):Boolean" << endl;
+ scope_up(f_service_);
+
+ f_service_ << indent() << "var msg:TMessage = iprot.readMessageBegin();" << endl;
+
+ // TODO(mcslee): validate message, was the seqid etc. legit?
+ // AS- If all method is oneway:
+ // do you have an oprot?
+ // do you you need nullcheck?
+ f_service_
+ << indent() << "var fn:Function = PROCESS_MAP[msg.name];" << endl << indent()
+ << "if (fn == null) {" << endl << indent() << " TProtocolUtil.skip(iprot, TType.STRUCT);"
+ << endl << indent() << " iprot.readMessageEnd();" << endl << indent()
+ << " var x:TApplicationError = new TApplicationError(TApplicationError.UNKNOWN_METHOD, "
+ "\"Invalid method name: '\"+msg.name+\"'\");" << endl << indent()
+ << " oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));"
+ << endl << indent() << " x.write(oprot);" << endl << indent() << " oprot.writeMessageEnd();"
+ << endl << indent() << " oprot.getTransport().flush();" << endl << indent()
+ << " return true;" << endl << indent() << "}" << endl << indent()
+ << "fn.call(this,msg.seqid, iprot, oprot);" << endl;
+
+ f_service_ << indent() << "return true;" << endl;
+
+ scope_down(f_service_);
+ f_service_ << endl;
+
+ // Generate the process subfunctions
+ for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
+ generate_process_function(tservice, *f_iter);
+ }
+
+ indent_down();
+ indent(f_service_) << "}" << endl << endl;
+}
+
+/**
+ * Generates a struct and helpers for a function.
+ *
+ * @param tfunction The function
+ */
+void t_as3_generator::generate_function_helpers(t_function* tfunction) {
+ if (tfunction->is_oneway()) {
+ return;
+ }
+
+ t_struct result(program_, tfunction->get_name() + "_result");
+ t_field success(tfunction->get_returntype(), "success", 0);
+ if (!tfunction->get_returntype()->is_void()) {
+ result.append(&success);
+ }
+
+ t_struct* xs = tfunction->get_xceptions();
+ const vector& fields = xs->get_members();
+ vector::const_iterator f_iter;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ result.append(*f_iter);
+ }
+
+ generate_as3_struct_definition(f_service_, &result, false, true, true);
+}
+
+/**
+ * Generates a process function definition.
+ *
+ * @param tfunction The function to write a dispatcher for
+ */
+void t_as3_generator::generate_process_function(t_service* tservice, t_function* tfunction) {
+ (void)tservice;
+ // Open class
+ indent(f_service_) << "private function " << tfunction->get_name() << "():Function {" << endl;
+ indent_up();
+
+ // Open function
+ indent(f_service_) << "return function(seqid:int, iprot:TProtocol, oprot:TProtocol):void" << endl;
+ scope_up(f_service_);
+
+ string argsname = tfunction->get_name() + "_args";
+ string resultname = tfunction->get_name() + "_result";
+
+ f_service_ << indent() << "var args:" << argsname << " = new " << argsname << "();" << endl
+ << indent() << "args.read(iprot);" << endl << indent() << "iprot.readMessageEnd();"
+ << endl;
+
+ t_struct* xs = tfunction->get_xceptions();
+ const std::vector& xceptions = xs->get_members();
+ vector::const_iterator x_iter;
+
+ // Declare result for non oneway function
+ if (!tfunction->is_oneway()) {
+ f_service_ << indent() << "var result:" << resultname << " = new " << resultname << "();"
+ << endl;
+ }
+
+ // Try block for a function with exceptions
+ if (xceptions.size() > 0) {
+ f_service_ << indent() << "try {" << endl;
+ indent_up();
+ }
+
+ // Generate the function call
+ t_struct* arg_struct = tfunction->get_arglist();
+ const std::vector& fields = arg_struct->get_members();
+ vector::const_iterator f_iter;
+
+ f_service_ << indent();
+ if (tfunction->is_oneway()) {
+ f_service_ << "iface_." << tfunction->get_name() << "(";
+ bool first = true;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if (first) {
+ first = false;
+ } else {
+ f_service_ << ", ";
+ }
+ f_service_ << "args." << (*f_iter)->get_name();
+ }
+ f_service_ << ");" << endl;
+ } else {
+ f_service_ << "// sorry this operation is not supported yet" << endl;
+ f_service_ << indent() << "throw new Error(\"This is not yet supported\");" << endl;
+ }
+
+ // Set isset on success field
+ if (!tfunction->is_oneway() && !tfunction->get_returntype()->is_void()
+ && !type_can_be_null(tfunction->get_returntype())) {
+ f_service_ << indent() << "result.set" << get_cap_name("success") << get_cap_name("isSet")
+ << "(true);" << endl;
+ }
+
+ if (!tfunction->is_oneway() && xceptions.size() > 0) {
+ indent_down();
+ f_service_ << indent() << "}";
+ for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) {
+ f_service_ << " catch (" << (*x_iter)->get_name() << ":"
+ << type_name((*x_iter)->get_type(), false, false) << ") {" << endl;
+ if (!tfunction->is_oneway()) {
+ indent_up();
+ f_service_ << indent() << "result." << (*x_iter)->get_name() << " = "
+ << (*x_iter)->get_name() << ";" << endl;
+ indent_down();
+ f_service_ << indent() << "}";
+ } else {
+ f_service_ << "}";
+ }
+ }
+ f_service_ << " catch (th:Error) {" << endl;
+ indent_up();
+ f_service_ << indent() << "trace(\"Internal error processing " << tfunction->get_name()
+ << "\", th);" << endl << indent()
+ << "var x:TApplicationError = new "
+ "TApplicationError(TApplicationError.INTERNAL_ERROR, \"Internal error processing "
+ << tfunction->get_name() << "\");" << endl << indent()
+ << "oprot.writeMessageBegin(new TMessage(\"" << tfunction->get_name()
+ << "\", TMessageType.EXCEPTION, seqid));" << endl << indent() << "x.write(oprot);"
+ << endl << indent() << "oprot.writeMessageEnd();" << endl << indent()
+ << "oprot.getTransport().flush();" << endl << indent() << "return;" << endl;
+ indent_down();
+ f_service_ << indent() << "}" << endl;
+ }
+
+ // Shortcut out here for oneway functions
+ if (tfunction->is_oneway()) {
+ f_service_ << indent() << "return;" << endl;
+ scope_down(f_service_);
+
+ // Close class
+ indent_down();
+ f_service_ << indent() << "}" << endl << endl;
+ return;
+ }
+
+ f_service_ << indent() << "oprot.writeMessageBegin(new TMessage(\"" << tfunction->get_name()
+ << "\", TMessageType.REPLY, seqid));" << endl << indent() << "result.write(oprot);"
+ << endl << indent() << "oprot.writeMessageEnd();" << endl << indent()
+ << "oprot.getTransport().flush();" << endl;
+
+ // Close function
+ scope_down(f_service_);
+ f_service_ << endl;
+
+ // Close class
+ indent_down();
+ f_service_ << indent() << "}" << endl << endl;
+}
+
+/**
+ * Deserializes a field of any type.
+ *
+ * @param tfield The field
+ * @param prefix The variable name or container for this field
+ */
+void t_as3_generator::generate_deserialize_field(ofstream& out, t_field* tfield, string prefix) {
+ t_type* type = get_true_type(tfield->get_type());
+
+ if (type->is_void()) {
+ throw "CANNOT GENERATE DESERIALIZE CODE FOR void TYPE: " + prefix + tfield->get_name();
+ }
+
+ string name = prefix + tfield->get_name();
+
+ if (type->is_struct() || type->is_xception()) {
+ generate_deserialize_struct(out, (t_struct*)type, name);
+ } else if (type->is_container()) {
+ generate_deserialize_container(out, type, name);
+ } else if (type->is_base_type() || type->is_enum()) {
+
+ indent(out) << name << " = iprot.";
+
+ if (type->is_base_type()) {
+ t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
+ switch (tbase) {
+ case t_base_type::TYPE_VOID:
+ throw "compiler error: cannot serialize void field in a struct: " + name;
+ break;
+ case t_base_type::TYPE_STRING:
+ if (type->is_binary()) {
+ out << "readBinary();";
+ } else {
+ out << "readString();";
+ }
+ break;
+ case t_base_type::TYPE_BOOL:
+ out << "readBool();";
+ break;
+ case t_base_type::TYPE_I8:
+ out << "readByte();";
+ break;
+ case t_base_type::TYPE_I16:
+ out << "readI16();";
+ break;
+ case t_base_type::TYPE_I32:
+ out << "readI32();";
+ break;
+ case t_base_type::TYPE_I64:
+ out << "readI64();";
+ break;
+ case t_base_type::TYPE_DOUBLE:
+ out << "readDouble();";
+ break;
+ default:
+ throw "compiler error: no As3 name for base type " + t_base_type::t_base_name(tbase);
+ }
+ } else if (type->is_enum()) {
+ out << "readI32();";
+ }
+ out << endl;
+ } else {
+ printf("DO NOT KNOW HOW TO DESERIALIZE FIELD '%s' TYPE '%s'\n",
+ tfield->get_name().c_str(),
+ type_name(type).c_str());
+ }
+}
+
+/**
+ * Generates an unserializer for a struct, invokes read()
+ */
+void t_as3_generator::generate_deserialize_struct(ofstream& out, t_struct* tstruct, string prefix) {
+ out << indent() << prefix << " = new " << type_name(tstruct) << "();" << endl << indent()
+ << prefix << ".read(iprot);" << endl;
+}
+
+/**
+ * Deserializes a container by reading its size and then iterating
+ */
+void t_as3_generator::generate_deserialize_container(ofstream& out, t_type* ttype, string prefix) {
+ scope_up(out);
+
+ string obj;
+
+ if (ttype->is_map()) {
+ obj = tmp("_map");
+ } else if (ttype->is_set()) {
+ obj = tmp("_set");
+ } else if (ttype->is_list()) {
+ obj = tmp("_list");
+ }
+
+ // Declare variables, read header
+ if (ttype->is_map()) {
+ indent(out) << "var " << obj << ":TMap = iprot.readMapBegin();" << endl;
+ } else if (ttype->is_set()) {
+ indent(out) << "var " << obj << ":TSet = iprot.readSetBegin();" << endl;
+ } else if (ttype->is_list()) {
+ indent(out) << "var " << obj << ":TList = iprot.readListBegin();" << endl;
+ }
+
+ indent(out) << prefix << " = new " << type_name(ttype, false, true)
+ // size the collection correctly
+ << "("
+ << ");" << endl;
+
+ // For loop iterates over elements
+ string i = tmp("_i");
+ indent(out) << "for (var " << i << ":int = 0; " << i << " < " << obj << ".size"
+ << "; "
+ << "++" << i << ")" << endl;
+
+ scope_up(out);
+
+ if (ttype->is_map()) {
+ generate_deserialize_map_element(out, (t_map*)ttype, prefix);
+ } else if (ttype->is_set()) {
+ generate_deserialize_set_element(out, (t_set*)ttype, prefix);
+ } else if (ttype->is_list()) {
+ generate_deserialize_list_element(out, (t_list*)ttype, prefix);
+ }
+
+ scope_down(out);
+
+ // Read container end
+ if (ttype->is_map()) {
+ indent(out) << "iprot.readMapEnd();" << endl;
+ } else if (ttype->is_set()) {
+ indent(out) << "iprot.readSetEnd();" << endl;
+ } else if (ttype->is_list()) {
+ indent(out) << "iprot.readListEnd();" << endl;
+ }
+
+ scope_down(out);
+}
+
+/**
+ * Generates code to deserialize a map
+ */
+void t_as3_generator::generate_deserialize_map_element(ofstream& out, t_map* tmap, string prefix) {
+ string key = tmp("_key");
+ string val = tmp("_val");
+ t_field fkey(tmap->get_key_type(), key);
+ t_field fval(tmap->get_val_type(), val);
+
+ indent(out) << declare_field(&fkey) << endl;
+ indent(out) << declare_field(&fval) << endl;
+
+ generate_deserialize_field(out, &fkey);
+ generate_deserialize_field(out, &fval);
+
+ indent(out) << prefix << "[" << key << "] = " << val << ";" << endl;
+}
+
+/**
+ * Deserializes a set element
+ */
+void t_as3_generator::generate_deserialize_set_element(ofstream& out, t_set* tset, string prefix) {
+ string elem = tmp("_elem");
+ t_field felem(tset->get_elem_type(), elem);
+
+ indent(out) << declare_field(&felem) << endl;
+
+ generate_deserialize_field(out, &felem);
+
+ indent(out) << prefix << ".add(" << elem << ");" << endl;
+}
+
+/**
+ * Deserializes a list element
+ */
+void t_as3_generator::generate_deserialize_list_element(ofstream& out,
+ t_list* tlist,
+ string prefix) {
+ string elem = tmp("_elem");
+ t_field felem(tlist->get_elem_type(), elem);
+
+ indent(out) << declare_field(&felem) << endl;
+
+ generate_deserialize_field(out, &felem);
+
+ indent(out) << prefix << ".push(" << elem << ");" << endl;
+}
+
+/**
+ * Serializes a field of any type.
+ *
+ * @param tfield The field to serialize
+ * @param prefix Name to prepend to field name
+ */
+void t_as3_generator::generate_serialize_field(ofstream& out, t_field* tfield, string prefix) {
+ t_type* type = get_true_type(tfield->get_type());
+
+ // Do nothing for void types
+ if (type->is_void()) {
+ throw "CANNOT GENERATE SERIALIZE CODE FOR void TYPE: " + prefix + tfield->get_name();
+ }
+
+ if (type->is_struct() || type->is_xception()) {
+ generate_serialize_struct(out, (t_struct*)type, prefix + tfield->get_name());
+ } else if (type->is_container()) {
+ generate_serialize_container(out, type, prefix + tfield->get_name());
+ } else if (type->is_base_type() || type->is_enum()) {
+
+ string name = prefix + tfield->get_name();
+ indent(out) << "oprot.";
+
+ if (type->is_base_type()) {
+ t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
+ switch (tbase) {
+ case t_base_type::TYPE_VOID:
+ throw "compiler error: cannot serialize void field in a struct: " + name;
+ break;
+ case t_base_type::TYPE_STRING:
+ if (type->is_binary()) {
+ out << "writeBinary(" << name << ");";
+ } else {
+ out << "writeString(" << name << ");";
+ }
+ break;
+ case t_base_type::TYPE_BOOL:
+ out << "writeBool(" << name << ");";
+ break;
+ case t_base_type::TYPE_I8:
+ out << "writeByte(" << name << ");";
+ break;
+ case t_base_type::TYPE_I16:
+ out << "writeI16(" << name << ");";
+ break;
+ case t_base_type::TYPE_I32:
+ out << "writeI32(" << name << ");";
+ break;
+ case t_base_type::TYPE_I64:
+ out << "writeI64(" << name << ");";
+ break;
+ case t_base_type::TYPE_DOUBLE:
+ out << "writeDouble(" << name << ");";
+ break;
+ default:
+ throw "compiler error: no As3 name for base type " + t_base_type::t_base_name(tbase);
+ }
+ } else if (type->is_enum()) {
+ out << "writeI32(" << name << ");";
+ }
+ out << endl;
+ } else {
+ printf("DO NOT KNOW HOW TO SERIALIZE FIELD '%s%s' TYPE '%s'\n",
+ prefix.c_str(),
+ tfield->get_name().c_str(),
+ type_name(type).c_str());
+ }
+}
+
+/**
+ * Serializes all the members of a struct.
+ *
+ * @param tstruct The struct to serialize
+ * @param prefix String prefix to attach to all fields
+ */
+void t_as3_generator::generate_serialize_struct(ofstream& out, t_struct* tstruct, string prefix) {
+ (void)tstruct;
+ out << indent() << prefix << ".write(oprot);" << endl;
+}
+
+/**
+ * Serializes a container by writing its size then the elements.
+ *
+ * @param ttype The type of container
+ * @param prefix String prefix for fields
+ */
+void t_as3_generator::generate_serialize_container(ofstream& out, t_type* ttype, string prefix) {
+ scope_up(out);
+
+ if (ttype->is_map()) {
+ string iter = tmp("_key");
+ string counter = tmp("_sizeCounter");
+ indent(out) << "var " << counter << ":int = 0;" << endl;
+ indent(out) << "for (var " << iter << ":* in " << prefix << ") {" << endl;
+ indent(out) << " " << counter << +"++;" << endl;
+ indent(out) << "}" << endl;
+
+ indent(out) << "oprot.writeMapBegin(new TMap(" << type_to_enum(((t_map*)ttype)->get_key_type())
+ << ", " << type_to_enum(((t_map*)ttype)->get_val_type()) << ", " << counter << "));"
+ << endl;
+ } else if (ttype->is_set()) {
+ indent(out) << "oprot.writeSetBegin(new TSet(" << type_to_enum(((t_set*)ttype)->get_elem_type())
+ << ", " << prefix << ".size));" << endl;
+ } else if (ttype->is_list()) {
+ indent(out) << "oprot.writeListBegin(new TList("
+ << type_to_enum(((t_list*)ttype)->get_elem_type()) << ", " << prefix << ".length));"
+ << endl;
+ }
+
+ string iter = tmp("elem");
+ if (ttype->is_map()) {
+ indent(out) << "for (var " << iter << ":* in " << prefix << ")";
+ } else if (ttype->is_set()) {
+ indent(out) << "for each (var " << iter << ":* in " << prefix << ".toArray())";
+ } else if (ttype->is_list()) {
+ indent(out) << "for each (var " << iter << ":* in " << prefix << ")";
+ }
+
+ scope_up(out);
+
+ if (ttype->is_map()) {
+ generate_serialize_map_element(out, (t_map*)ttype, iter, prefix);
+ } else if (ttype->is_set()) {
+ generate_serialize_set_element(out, (t_set*)ttype, iter);
+ } else if (ttype->is_list()) {
+ generate_serialize_list_element(out, (t_list*)ttype, iter);
+ }
+
+ scope_down(out);
+
+ if (ttype->is_map()) {
+ indent(out) << "oprot.writeMapEnd();" << endl;
+ } else if (ttype->is_set()) {
+ indent(out) << "oprot.writeSetEnd();" << endl;
+ } else if (ttype->is_list()) {
+ indent(out) << "oprot.writeListEnd();" << endl;
+ }
+
+ scope_down(out);
+}
+
+/**
+ * Serializes the members of a map.
+ */
+void t_as3_generator::generate_serialize_map_element(ofstream& out,
+ t_map* tmap,
+ string iter,
+ string map) {
+ t_field kfield(tmap->get_key_type(), iter);
+ generate_serialize_field(out, &kfield, "");
+ t_field vfield(tmap->get_val_type(), map + "[" + iter + "]");
+ generate_serialize_field(out, &vfield, "");
+}
+
+/**
+ * Serializes the members of a set.
+ */
+void t_as3_generator::generate_serialize_set_element(ofstream& out, t_set* tset, string iter) {
+ t_field efield(tset->get_elem_type(), iter);
+ generate_serialize_field(out, &efield, "");
+}
+
+/**
+ * Serializes the members of a list.
+ */
+void t_as3_generator::generate_serialize_list_element(ofstream& out, t_list* tlist, string iter) {
+ t_field efield(tlist->get_elem_type(), iter);
+ generate_serialize_field(out, &efield, "");
+}
+
+/**
+ * Returns a As3 type name
+ *
+ * @param ttype The type
+ * @param container Is the type going inside a container?
+ * @return As3 type name, i.e. HashMap
+ */
+string t_as3_generator::type_name(t_type* ttype, bool in_container, bool in_init) {
+ (void)in_init;
+ // In As3 typedefs are just resolved to their real type
+ ttype = get_true_type(ttype);
+ string prefix;
+
+ if (ttype->is_base_type()) {
+ return base_type_name((t_base_type*)ttype, in_container);
+ } else if (ttype->is_enum()) {
+ return "int";
+ } else if (ttype->is_map()) {
+ return "Dictionary";
+ } else if (ttype->is_set()) {
+ return "Set";
+ } else if (ttype->is_list()) {
+ return "Array";
+ }
+
+ // Check for namespacing
+ t_program* program = ttype->get_program();
+ if (program != NULL && program != program_) {
+ string package = program->get_namespace("as3");
+ if (!package.empty()) {
+ return package + "." + ttype->get_name();
+ }
+ }
+
+ return ttype->get_name();
+}
+
+/**
+ * Returns the AS3 type that corresponds to the thrift type.
+ *
+ * @param tbase The base type
+ * @param container Is it going in a As3 container?
+ */
+string t_as3_generator::base_type_name(t_base_type* type, bool in_container) {
+ (void)in_container;
+ t_base_type::t_base tbase = type->get_base();
+
+ switch (tbase) {
+ case t_base_type::TYPE_VOID:
+ return "void";
+ case t_base_type::TYPE_STRING:
+ if (type->is_binary()) {
+ return "ByteArray";
+ } else {
+ return "String";
+ }
+ case t_base_type::TYPE_BOOL:
+ return "Boolean";
+ case t_base_type::TYPE_I8:
+ case t_base_type::TYPE_I16:
+ case t_base_type::TYPE_I32:
+ return "int";
+ case t_base_type::TYPE_I64:
+ throw "i64 is not yet supported in as3";
+ case t_base_type::TYPE_DOUBLE:
+ return "Number";
+ default:
+ throw "compiler error: no As3 name for base type " + t_base_type::t_base_name(tbase);
+ }
+}
+
+/**
+ * Declares a field, which may include initialization as necessary.
+ *
+ * @param ttype The type
+ */
+string t_as3_generator::declare_field(t_field* tfield, bool init) {
+ // TODO(mcslee): do we ever need to initialize the field?
+ string result = "var " + tfield->get_name() + ":" + type_name(tfield->get_type());
+ if (init) {
+ t_type* ttype = get_true_type(tfield->get_type());
+ if (ttype->is_base_type() && tfield->get_value() != NULL) {
+ ofstream dummy;
+ result += " = " + render_const_value(dummy, tfield->get_name(), ttype, tfield->get_value());
+ } else if (ttype->is_base_type()) {
+ t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base();
+ switch (tbase) {
+ case t_base_type::TYPE_VOID:
+ throw "NO T_VOID CONSTRUCT";
+ case t_base_type::TYPE_STRING:
+ result += " = null";
+ break;
+ case t_base_type::TYPE_BOOL:
+ result += " = false";
+ break;
+ case t_base_type::TYPE_I8:
+ case t_base_type::TYPE_I16:
+ case t_base_type::TYPE_I32:
+ case t_base_type::TYPE_I64:
+ result += " = 0";
+ break;
+ case t_base_type::TYPE_DOUBLE:
+ result += " = (double)0";
+ break;
+ }
+
+ } else if (ttype->is_enum()) {
+ result += " = 0";
+ } else if (ttype->is_container()) {
+ result += " = new " + type_name(ttype, false, true) + "()";
+ } else {
+ result += " = new " + type_name(ttype, false, true) + "()";
+ ;
+ }
+ }
+ return result + ";";
+}
+
+/**
+ * Renders a function signature of the form 'type name(args)'
+ *
+ * @param tfunction Function definition
+ * @return String of rendered function definition
+ */
+string t_as3_generator::function_signature(t_function* tfunction, string prefix) {
+ std::string arguments = argument_list(tfunction->get_arglist());
+ if (!tfunction->is_oneway()) {
+ if (arguments != "") {
+ arguments += ", ";
+ }
+ arguments += "onError:Function, onSuccess:Function";
+ }
+
+ std::string result = "function " + prefix + tfunction->get_name() + "(" + arguments + "):void";
+ return result;
+}
+
+/**
+ * Renders a comma separated field list, with type names
+ */
+string t_as3_generator::argument_list(t_struct* tstruct) {
+ string result = "";
+
+ const vector& fields = tstruct->get_members();
+ vector::const_iterator f_iter;
+ bool first = true;
+ for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
+ if (first) {
+ first = false;
+ } else {
+ result += ", ";
+ }
+ result += (*f_iter)->get_name() + ":" + type_name((*f_iter)->get_type());
+ }
+ return result;
+}
+
+/**
+ * Converts the parse type to a C++ enum string for the given type.
+ */
+string t_as3_generator::type_to_enum(t_type* type) {
+ type = get_true_type(type);
+
+ if (type->is_base_type()) {
+ t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
+ switch (tbase) {
+ case t_base_type::TYPE_VOID:
+ throw "NO T_VOID CONSTRUCT";
+ case t_base_type::TYPE_STRING:
+ return "TType.STRING";
+ case t_base_type::TYPE_BOOL:
+ return "TType.BOOL";
+ case t_base_type::TYPE_I8:
+ return "TType.BYTE";
+ case t_base_type::TYPE_I16:
+ return "TType.I16";
+ case t_base_type::TYPE_I32:
+ return "TType.I32";
+ case t_base_type::TYPE_I64:
+ return "TType.I64";
+ case t_base_type::TYPE_DOUBLE:
+ return "TType.DOUBLE";
+ }
+ } else if (type->is_enum()) {
+ return "TType.I32";
+ } else if (type->is_struct() || type->is_xception()) {
+ return "TType.STRUCT";
+ } else if (type->is_map()) {
+ return "TType.MAP";
+ } else if (type->is_set()) {
+ return "TType.SET";
+ } else if (type->is_list()) {
+ return "TType.LIST";
+ }
+
+ throw "INVALID TYPE IN type_to_enum: " + type->get_name();
+}
+
+/**
+ * Applies the correct style to a string based on the value of nocamel_style_
+ */
+std::string t_as3_generator::get_cap_name(std::string name) {
+ name[0] = toupper(name[0]);
+ return name;
+}
+
+string t_as3_generator::constant_name(string name) {
+ string constant_name;
+
+ bool is_first = true;
+ bool was_previous_char_upper = false;
+ for (string::iterator iter = name.begin(); iter != name.end(); ++iter) {
+ string::value_type character = (*iter);
+
+ bool is_upper = isupper(character);
+
+ if (is_upper && !is_first && !was_previous_char_upper) {
+ constant_name += '_';
+ }
+ constant_name += toupper(character);
+
+ is_first = false;
+ was_previous_char_upper = is_upper;
+ }
+
+ return constant_name;
+}
+
+/**
+ * Emits a As3Doc comment if the provided object has a doc in Thrift
+ */
+void t_as3_generator::generate_as3_doc(ofstream& out, t_doc* tdoc) {
+ if (tdoc->has_doc()) {
+ generate_docstring_comment(out, "/**\n", " * ", tdoc->get_doc(), " */\n");
+ }
+}
+
+/**
+ * Emits a As3Doc comment if the provided function object has a doc in Thrift
+ */
+void t_as3_generator::generate_as3_doc(ofstream& out, t_function* tfunction) {
+ if (tfunction->has_doc()) {
+ stringstream ss;
+ ss << tfunction->get_doc();
+ const vector& fields = tfunction->get_arglist()->get_members();
+ vector::const_iterator p_iter;
+ for (p_iter = fields.begin(); p_iter != fields.end(); ++p_iter) {
+ t_field* p = *p_iter;
+ ss << "\n@param " << p->get_name();
+ if (p->has_doc()) {
+ ss << " " << p->get_doc();
+ }
+ }
+ generate_docstring_comment(out, "/**\n", " * ", ss.str(), " */\n");
+ }
+}
+
+std::string t_as3_generator::generate_isset_check(t_field* field) {
+ return generate_isset_check(field->get_name());
+}
+
+std::string t_as3_generator::generate_isset_check(std::string field_name) {
+ return "is" + get_cap_name("set") + get_cap_name(field_name) + "()";
+}
+
+void t_as3_generator::generate_isset_set(ofstream& out, t_field* field) {
+ if (!type_can_be_null(field->get_type())) {
+ indent(out) << "this.__isset_" << field->get_name() << " = true;" << endl;
+ }
+}
+
+std::string t_as3_generator::get_enum_class_name(t_type* type) {
+ string package = "";
+ t_program* program = type->get_program();
+ if (program != NULL && program != program_) {
+ package = program->get_namespace("as3") + ".";
+ }
+ return package + type->get_name();
+}
+
+THRIFT_REGISTER_GENERATOR(
+ as3,
+ "AS3",
+ " bindable: Add [bindable] metadata to all the struct classes.\n")
diff --git a/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc
new file mode 100644
index 000000000..a7beca757
--- /dev/null
+++ b/vendor/github.com/apache/thrift/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc
@@ -0,0 +1,4562 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ * Contains some contributions under the Thrift Software License.
+ * Please see doc/old-thrift-license.txt in the Thrift distribution for
+ * details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "thrift/platform.h"
+#include "thrift/generate/t_oop_generator.h"
+
+using std::map;
+using std::ofstream;
+using std::ostringstream;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+static const string endl = "\n"; // avoid ostream << std::endl flushes
+
+/* forward declarations */
+string initial_caps_to_underscores(string name);
+string underscores_to_initial_caps(string name);
+string to_upper_case(string name);
+string to_lower_case(string name);
+
+/**
+ * C code generator, using glib for C typing.
+ */
+class t_c_glib_generator : public t_oop_generator {
+public:
+ /* constructor */
+ t_c_glib_generator(t_program* program,
+ const map& parsed_options,
+ const string& option_string)
+ : t_oop_generator(program) {
+ (void)option_string;
+ std::map::const_iterator iter;
+
+ /* set the output directory */
+ this->out_dir_base_ = "gen-c_glib";
+
+ /* no options yet */
+ for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) {
+ throw "unknown option c_glib:" + iter->first;
+ }
+
+ /* set the namespace */
+ this->nspace = program_->get_namespace("c_glib");
+
+ if (this->nspace.empty()) {
+ this->nspace = "";
+ this->nspace_u = "";
+ this->nspace_uc = "";
+ this->nspace_lc = "";
+ } else {
+ /* replace dots with underscores */
+ char* tmp = strdup(this->nspace.c_str());
+ for (unsigned int i = 0; i < strlen(tmp); i++) {
+ if (tmp[i] == '.') {
+ tmp[i] = '_';
+ }
+ }
+ this->nspace = string(tmp, strlen(tmp));
+ free(tmp);
+
+ /* clean up the namespace for C.
+ * An input of 'namespace foo' should result in:
+ * - nspace = foo - for thrift objects and typedefs
+ * - nspace_u = Foo - for internal GObject prefixes
+ * - nspace_uc = FOO_ - for macro prefixes
+ * - nspace_lc = foo_ - for filename and method prefixes
+ * The underscores are there since uc and lc strings are used as file and
+ * variable prefixes.
+ */
+ this->nspace_u = initial_caps_to_underscores(this->nspace);
+ this->nspace_uc = to_upper_case(this->nspace_u) + "_";
+ this->nspace_lc = to_lower_case(this->nspace_u) + "_";
+ }
+ }
+
+ /* initialization and destruction */
+ void init_generator();
+ void close_generator();
+
+ /* generation functions */
+ void generate_typedef(t_typedef* ttypedef);
+ void generate_enum(t_enum* tenum);
+ void generate_consts(vector consts);
+ void generate_struct(t_struct* tstruct);
+ void generate_service(t_service* tservice);
+ void generate_xception(t_struct* tstruct);
+
+private:
+ /* file streams */
+ ofstream f_types_;
+ ofstream f_types_impl_;
+ ofstream f_header_;
+ ofstream f_service_;
+
+ /* namespace variables */
+ string nspace;
+ string nspace_u;
+ string nspace_uc;
+ string nspace_lc;
+
+ /* helper functions */
+ bool is_complex_type(t_type* ttype);
+ bool is_numeric(t_type* ttype);
+ string type_name(t_type* ttype, bool in_typedef = false, bool is_const = false);
+ string property_type_name(t_type* ttype, bool in_typedef = false, bool is_const = false);
+ string base_type_name(t_type* type);
+ string type_to_enum(t_type* type);
+ string constant_literal(t_type* type, t_const_value* value);
+ string constant_value(string name, t_type* type, t_const_value* value);
+ string constant_value_with_storage(string name, t_type* type, t_const_value* value);
+ string function_signature(t_function* tfunction);
+ string argument_list(t_struct* tstruct);
+ string xception_list(t_struct* tstruct);
+ string declare_field(t_field* tfield,
+ bool init = false,
+ bool pointer = false,
+ bool constant = false,
+ bool reference = false);
+ void declare_local_variable(ofstream& out, t_type* ttype, string& base_name, bool for_hash_table);
+ void declore_local_variable_for_write(ofstream& out, t_type* ttype, string& base_name);
+
+ /* generation functions */
+ void generate_const_initializer(string name,
+ t_type* type,
+ t_const_value* value,
+ bool top_level = false);
+ void generate_service_helpers(t_service* tservice);
+ void generate_service_client(t_service* tservice);
+ void generate_service_handler(t_service* tservice);
+ void generate_service_processor(t_service* tservice);
+ void generate_service_server(t_service* tservice);
+ void generate_object(t_struct* tstruct);
+ void generate_struct_writer(ofstream& out,
+ t_struct* tstruct,
+ string this_name,
+ string this_get = "",
+ bool is_function = true);
+ void generate_struct_reader(ofstream& out,
+ t_struct* tstruct,
+ string this_name,
+ string this_get = "",
+ bool is_function = true);
+
+ void generate_serialize_field(ofstream& out,
+ t_field* tfield,
+ string prefix,
+ string suffix,
+ int error_ret);
+ void generate_serialize_struct(ofstream& out, t_struct* tstruct, string prefix, int error_ret);
+ void generate_serialize_container(ofstream& out, t_type* ttype, string prefix, int error_ret);
+ void generate_serialize_map_element(ofstream& out,
+ t_map* tmap,
+ string key,
+ string value,
+ int error_ret);
+ void generate_serialize_set_element(ofstream& out, t_set* tset, string element, int error_ret);
+ void generate_serialize_list_element(ofstream& out,
+ t_list* tlist,
+ string list,
+ string index,
+ int error_ret);
+
+ void generate_deserialize_field(ofstream& out,
+ t_field* tfield,
+ string prefix,
+ string suffix,
+ int error_ret,
+ bool allocate = true);
+ void generate_deserialize_struct(ofstream& out,
+ t_struct* tstruct,
+ string prefix,
+ int error_ret,
+ bool allocate = true);
+ void generate_deserialize_container(ofstream& out, t_type* ttype, string prefix, int error_ret);
+ void generate_deserialize_map_element(ofstream& out, t_map* tmap, string prefix, int error_ret);
+ void generate_deserialize_set_element(ofstream& out, t_set* tset, string prefix, int error_ret);
+ void generate_deserialize_list_element(ofstream& out,
+ t_list* tlist,
+ string prefix,
+ string index,
+ int error_ret);
+
+ string generate_new_hash_from_type(t_type* key, t_type* value);
+ string generate_new_array_from_type(t_type* ttype);
+
+ string generate_free_func_from_type(t_type* ttype);
+ string generate_hash_func_from_type(t_type* ttype);
+ string generate_cmp_func_from_type(t_type* ttype);
+};
+
+/**
+ * Prepare for file generation by opening up the necessary file
+ * output streams.
+ */
+void t_c_glib_generator::init_generator() {
+ /* create output directory */
+ MKDIR(get_out_dir().c_str());
+
+ string program_name_u = initial_caps_to_underscores(program_name_);
+ string program_name_uc = to_upper_case(program_name_u);
+ string program_name_lc = to_lower_case(program_name_u);
+
+ /* create output files */
+ string f_types_name = get_out_dir() + this->nspace_lc + program_name_lc + "_types.h";
+ f_types_.open(f_types_name.c_str());
+ string f_types_impl_name = get_out_dir() + this->nspace_lc + program_name_lc + "_types.c";
+ f_types_impl_.open(f_types_impl_name.c_str());
+
+ /* add thrift boilerplate headers */
+ f_types_ << autogen_comment();
+ f_types_impl_ << autogen_comment();
+
+ /* include inclusion guard */
+ f_types_ << "#ifndef " << this->nspace_uc << program_name_uc << "_TYPES_H" << endl << "#define "
+ << this->nspace_uc << program_name_uc << "_TYPES_H" << endl << endl;
+
+ /* include base types */
+ f_types_ << "/* base includes */" << endl << "#include " << endl
+ << "#include " << endl
+ << "#include " << endl;
+
+ /* include other thrift includes */
+ const vector& includes = program_->get_includes();
+ for (size_t i = 0; i < includes.size(); ++i) {
+ f_types_ << "/* other thrift includes */" << endl << "#include \"" << this->nspace_lc
+ << initial_caps_to_underscores(includes[i]->get_name()) << "_types.h\"" << endl;
+ }
+ f_types_ << endl;
+
+ /* include custom headers */
+ const vector& c_includes = program_->get_c_includes();
+ f_types_ << "/* custom thrift includes */" << endl;
+ for (size_t i = 0; i < c_includes.size(); ++i) {
+ if (c_includes[i][0] == '<') {
+ f_types_ << "#include " << c_includes[i] << endl;
+ } else {
+ f_types_ << "#include \"" << c_includes[i] << "\"" << endl;
+ }
+ }
+ f_types_ << endl;
+
+ /* include math.h (for "INFINITY") in the implementation file, in case we
+ encounter a struct with a member of type double */
+ f_types_impl_ << endl << "#include " << endl;
+
+ // include the types file
+ f_types_impl_ << endl << "#include \"" << this->nspace_lc << program_name_u << "_types.h\""
+ << endl << "#include " << endl << endl;
+
+ f_types_ << "/* begin types */" << endl << endl;
+}
+
+/**
+ * Finish up generation and close all file streams.
+ */
+void t_c_glib_generator::close_generator() {
+ string program_name_uc = to_upper_case(initial_caps_to_underscores(program_name_));
+
+ /* end the header inclusion guard */
+ f_types_ << "#endif /* " << this->nspace_uc << program_name_uc << "_TYPES_H */" << endl;
+
+ /* close output file */
+ f_types_.close();
+ f_types_impl_.close();
+}
+
+/**
+ * Generates a Thrift typedef in C code. For example:
+ *
+ * Thrift:
+ * typedef map SomeMap
+ *
+ * C:
+ * typedef GHashTable * ThriftSomeMap;
+ */
+void t_c_glib_generator::generate_typedef(t_typedef* ttypedef) {
+ f_types_ << indent() << "typedef " << type_name(ttypedef->get_type(), true) << " " << this->nspace
+ << ttypedef->get_symbolic() << ";" << endl << endl;
+}
+
+/**
+ * Generates a C enumeration. For example:
+ *
+ * Thrift:
+ * enum MyEnum {
+ * ONE = 1,
+ * TWO
+ * }
+ *
+ * C:
+ * enum _ThriftMyEnum {
+ * THRIFT_MY_ENUM_ONE = 1,
+ * THRIFT_MY_ENUM_TWO
+ * };
+ * typedef enum _ThriftMyEnum ThriftMyEnum;
+ */
+void t_c_glib_generator::generate_enum(t_enum* tenum) {
+ string name = tenum->get_name();
+ string name_uc = to_upper_case(initial_caps_to_underscores(name));
+
+ f_types_ << indent() << "enum _" << this->nspace << name << " {" << endl;
+
+ indent_up();
+
+ vector constants = tenum->get_constants();
+ vector::iterator c_iter;
+ bool first = true;
+
+ /* output each of the enumeration elements */
+ for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
+ if (first) {
+ first = false;
+ } else {
+ f_types_ << "," << endl;
+ }
+
+ f_types_ << indent() << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name();
+ f_types_ << " = " << (*c_iter)->get_value();
+ }
+
+ indent_down();
+ f_types_ << endl << "};" << endl << "typedef enum _" << this->nspace << name << " "
+ << this->nspace << name << ";" << endl << endl;
+
+ f_types_ << "/* return the name of the constant */" << endl;
+ f_types_ << "const char *" << endl;
+ f_types_ << "toString_" << name << "(int value); " << endl << endl;
+ ;
+ f_types_impl_ << "/* return the name of the constant */" << endl;
+ f_types_impl_ << "const char *" << endl;
+ f_types_impl_ << "toString_" << name << "(int value) " << endl;
+ f_types_impl_ << "{" << endl;
+ f_types_impl_ << " static __thread char buf[16] = {0};" << endl;
+ f_types_impl_ << " switch(value) {" << endl;
+ std::set done;
+ for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
+ int value = (*c_iter)->get_value();
+ // Skipping duplicate value
+ if (done.find(value) == done.end()) {
+ done.insert(value);
+ f_types_impl_ << " case " << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name()
+ << ":"
+ << "return \"" << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name()
+ << "\";" << endl;
+ }
+ }
+ f_types_impl_ << " default: g_snprintf(buf, 16, \"%d\", value); return buf;" << endl;
+ f_types_impl_ << " }" << endl;
+ f_types_impl_ << "}" << endl << endl;
+}
+
+/**
+ * Generates Thrift constants in C code.
+ */
+void t_c_glib_generator::generate_consts(vector consts) {
+ f_types_ << "/* constants */" << endl;
+ f_types_impl_ << "/* constants */" << endl;
+
+ vector::iterator c_iter;
+ for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) {
+ string name = (*c_iter)->get_name();
+ string name_uc = to_upper_case(name);
+ string name_lc = to_lower_case(name);
+ t_type* type = (*c_iter)->get_type();
+ t_const_value* value = (*c_iter)->get_value();
+
+ if (is_complex_type(type)) {
+ f_types_ << type_name(type) << indent() << this->nspace_lc << name_lc
+ << "_constant();" << endl;
+ }
+
+ f_types_ << indent() << "#define " << this->nspace_uc << name_uc << " "
+ << constant_value(name_lc, type, value) << endl;
+
+ generate_const_initializer(name_lc, type, value, true);
+ }
+
+ f_types_ << endl;
+ f_types_impl_ << endl;
+}
+
+/**
+ * Generate Thrift structs in C code, as GObjects. Example:
+ *
+ * Thrift:
+ * struct Bonk
+ * {
+ * 1: string message,
+ * 2: i32 type
+ * }
+ *
+ * C GObject instance header:
+ * struct _ThriftBonk
+ * {
+ * GObject parent;
+ *
+ * gchar * message;
+ * gint32 type;
+ * };
+ * typedef struct _ThriftBonk ThriftBonk
+ * // ... additional GObject boilerplate ...
+ */
+void t_c_glib_generator::generate_struct(t_struct* tstruct) {
+ f_types_ << "/* struct " << tstruct->get_name() << " */" << endl;
+ generate_object(tstruct);
+}
+
+/**
+ * Generate C code to represent Thrift services. Creates a new GObject
+ * which can be used to access the service.
+ */
+void t_c_glib_generator::generate_service(t_service* tservice) {
+ string svcname_u = initial_caps_to_underscores(tservice->get_name());
+ string svcname_uc = this->nspace_uc + to_upper_case(svcname_u);
+ string filename = this->nspace_lc + to_lower_case(svcname_u);
+
+ // make output files
+ string f_header_name = get_out_dir() + filename + ".h";
+ f_header_.open(f_header_name.c_str());
+
+ string program_name_u = initial_caps_to_underscores(program_name_);
+ string program_name_lc = to_lower_case(program_name_u);
+
+ // add header file boilerplate
+ f_header_ << autogen_comment();
+
+ // add an inclusion guard
+ f_header_ << "#ifndef " << svcname_uc << "_H" << endl << "#define " << svcname_uc << "_H" << endl
+ << endl;
+
+ // add standard includes
+ f_header_ << "#include " << endl << endl;
+ f_header_ << "#include \"" << this->nspace_lc << program_name_lc << "_types.h\"" << endl;
+
+ // if we are inheriting from another service, include its header
+ t_service* extends_service = tservice->get_extends();
+ if (extends_service != NULL) {
+ f_header_ << "#include \"" << this->nspace_lc
+ << to_lower_case(initial_caps_to_underscores(extends_service->get_name())) << ".h\""
+ << endl;
+ }
+ f_header_ << endl;
+
+ // create the service implementation
+ string f_service_name = get_out_dir() + filename + ".c";
+ f_service_.open(f_service_name.c_str());
+
+ // add the boilerplace header
+ f_service_ << autogen_comment();
+
+ // include the headers
+ f_service_ << "#include " << endl << "#include " << endl
+ << "#include