Fnlb was moved to its own repo: fnproject/lb (#702)

* Fnlb was moved to its own repo: fnproject/lb

* Clean up fnlb leftovers

* Newer deps
This commit is contained in:
Denis Makogon
2018-01-23 00:17:29 +02:00
committed by Reed Allman
parent 4ffa3d5005
commit d3be603e54
8310 changed files with 457462 additions and 1749312 deletions

View File

@@ -5,7 +5,7 @@ type csiEntryState struct {
}
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
logger.Infof("CsiEntry::Handle %#x", b)
csiState.parser.logf("CsiEntry::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
@@ -25,7 +25,7 @@ func (csiState csiEntryState) Handle(b byte) (s state, e error) {
}
func (csiState csiEntryState) Transition(s state) error {
logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {

View File

@@ -5,7 +5,7 @@ type csiParamState struct {
}
func (csiState csiParamState) Handle(b byte) (s state, e error) {
logger.Infof("CsiParam::Handle %#x", b)
csiState.parser.logf("CsiParam::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
@@ -26,7 +26,7 @@ func (csiState csiParamState) Handle(b byte) (s state, e error) {
}
func (csiState csiParamState) Transition(s state) error {
logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {

View File

@@ -5,7 +5,7 @@ type escapeIntermediateState struct {
}
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
logger.Infof("escapeIntermediateState::Handle %#x", b)
escState.parser.logf("escapeIntermediateState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
@@ -24,7 +24,7 @@ func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
}
func (escState escapeIntermediateState) Transition(s state) error {
logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {

View File

@@ -5,7 +5,7 @@ type escapeState struct {
}
func (escState escapeState) Handle(b byte) (s state, e error) {
logger.Infof("escapeState::Handle %#x", b)
escState.parser.logf("escapeState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
@@ -28,7 +28,7 @@ func (escState escapeState) Handle(b byte) (s state, e error) {
}
func (escState escapeState) Transition(s state) error {
logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {

View File

@@ -5,7 +5,7 @@ type oscStringState struct {
}
func (oscState oscStringState) Handle(b byte) (s state, e error) {
logger.Infof("OscString::Handle %#x", b)
oscState.parser.logf("OscString::Handle %#x", b)
nextState, err := oscState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err

View File

@@ -2,14 +2,10 @@ package ansiterm
import (
"errors"
"io/ioutil"
"log"
"os"
"github.com/sirupsen/logrus"
)
var logger *logrus.Logger
type AnsiParser struct {
currState state
eventHandler AnsiEventHandler
@@ -23,50 +19,69 @@ type AnsiParser struct {
ground state
oscString state
stateMap []state
logf func(string, ...interface{})
}
func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
logFile := ioutil.Discard
type Option func(*AnsiParser)
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("ansiParser.log")
func WithLogf(f func(string, ...interface{})) Option {
return func(ap *AnsiParser) {
ap.logf = f
}
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.InfoLevel,
}
parser := &AnsiParser{
func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
ap := &AnsiParser{
eventHandler: evtHandler,
context: &ansiContext{},
}
parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
parser.error = errorState{baseState{name: "Error", parser: parser}}
parser.ground = groundState{baseState{name: "Ground", parser: parser}}
parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
parser.stateMap = []state{
parser.csiEntry,
parser.csiParam,
parser.dcsEntry,
parser.escape,
parser.escapeIntermediate,
parser.error,
parser.ground,
parser.oscString,
for _, o := range opts {
o(ap)
}
parser.currState = getState(initialState, parser.stateMap)
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
logFile, _ := os.Create("ansiParser.log")
logger := log.New(logFile, "", log.LstdFlags)
if ap.logf != nil {
l := ap.logf
ap.logf = func(s string, v ...interface{}) {
l(s, v...)
logger.Printf(s, v...)
}
} else {
ap.logf = logger.Printf
}
}
logger.Infof("CreateParser: parser %p", parser)
return parser
if ap.logf == nil {
ap.logf = func(string, ...interface{}) {}
}
ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
ap.error = errorState{baseState{name: "Error", parser: ap}}
ap.ground = groundState{baseState{name: "Ground", parser: ap}}
ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
ap.stateMap = []state{
ap.csiEntry,
ap.csiParam,
ap.dcsEntry,
ap.escape,
ap.escapeIntermediate,
ap.error,
ap.ground,
ap.oscString,
}
ap.currState = getState(initialState, ap.stateMap)
ap.logf("CreateParser: parser %p", ap)
return ap
}
func getState(name string, states []state) state {
@@ -97,7 +112,7 @@ func (ap *AnsiParser) handle(b byte) error {
}
if newState == nil {
logger.Warning("newState is nil")
ap.logf("WARNING: newState is nil")
return errors.New("New state of 'nil' is invalid.")
}
@@ -111,23 +126,23 @@ func (ap *AnsiParser) handle(b byte) error {
}
func (ap *AnsiParser) changeState(newState state) error {
logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
// Exit old state
if err := ap.currState.Exit(); err != nil {
logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
return err
}
// Perform transition action
if err := ap.currState.Transition(newState); err != nil {
logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
return err
}
// Enter new state
if err := newState.Enter(); err != nil {
logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
return err
}

View File

@@ -27,7 +27,6 @@ func parseParams(bytes []byte) ([]string, error) {
params = append(params, s)
}
logger.Infof("Parsed params: %v with length: %d", params, len(params))
return params, nil
}
@@ -37,7 +36,6 @@ func parseCmd(context ansiContext) (string, error) {
func getInt(params []string, dflt int) int {
i := getInts(params, 1, dflt)[0]
logger.Infof("getInt: %v", i)
return i
}
@@ -60,8 +58,6 @@ func getInts(params []string, minCount int, dflt int) []int {
}
}
logger.Infof("getInts: %v", ints)
return ints
}

View File

@@ -1,19 +1,15 @@
package ansiterm
import (
"fmt"
)
func (ap *AnsiParser) collectParam() error {
currChar := ap.context.currentChar
logger.Infof("collectParam %#x", currChar)
ap.logf("collectParam %#x", currChar)
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
return nil
}
func (ap *AnsiParser) collectInter() error {
currChar := ap.context.currentChar
logger.Infof("collectInter %#x", currChar)
ap.logf("collectInter %#x", currChar)
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
return nil
}
@@ -21,8 +17,8 @@ func (ap *AnsiParser) collectInter() error {
func (ap *AnsiParser) escDispatch() error {
cmd, _ := parseCmd(*ap.context)
intermeds := ap.context.interBuffer
logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
ap.logf("escDispatch: %v(%v)", cmd, intermeds)
switch cmd {
case "D": // IND
@@ -43,8 +39,9 @@ func (ap *AnsiParser) escDispatch() error {
func (ap *AnsiParser) csiDispatch() error {
cmd, _ := parseCmd(*ap.context)
params, _ := parseParams(ap.context.paramBuffer)
ap.logf("Parsed params: %v with length: %d", params, len(params))
logger.Infof("csiDispatch: %v(%v)", cmd, params)
ap.logf("csiDispatch: %v(%v)", cmd, params)
switch cmd {
case "@":
@@ -102,7 +99,7 @@ func (ap *AnsiParser) csiDispatch() error {
top, bottom := ints[0], ints[1]
return ap.eventHandler.DECSTBM(top, bottom)
default:
logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context))
ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
return nil
}

View File

@@ -175,7 +175,7 @@ func GetStdFile(nFile int) (*os.File, uintptr) {
fd, err := syscall.GetStdHandle(nFile)
if err != nil {
panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
}
return file, uintptr(fd)

View File

@@ -49,17 +49,22 @@ var (
const (
// Console modes
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_LINE_INPUT = 0x0002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_INSERT_MODE = 0x0020
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_LINE_INPUT = 0x0002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_INSERT_MODE = 0x0020
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_AUTO_POSITION = 0x0100
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
// Character attributes
// Note:

View File

@@ -34,7 +34,7 @@ func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL
if err != nil {
return err
}
logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
return err
}

View File

@@ -50,8 +50,8 @@ func (h *windowsAnsiEventHandler) insertLines(param int) error {
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{

View File

@@ -4,16 +4,13 @@ package winterm
import (
"bytes"
"io/ioutil"
"log"
"os"
"strconv"
"github.com/Azure/go-ansiterm"
"github.com/sirupsen/logrus"
)
var logger *logrus.Logger
type windowsAnsiEventHandler struct {
fd uintptr
file *os.File
@@ -28,32 +25,52 @@ type windowsAnsiEventHandler struct {
marginByte byte
curInfo *CONSOLE_SCREEN_BUFFER_INFO
curPos COORD
logf func(string, ...interface{})
}
func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
logFile := ioutil.Discard
type Option func(*windowsAnsiEventHandler)
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("winEventHandler.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.DebugLevel,
func WithLogf(f func(string, ...interface{})) Option {
return func(w *windowsAnsiEventHandler) {
w.logf = f
}
}
func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
infoReset, err := GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
return &windowsAnsiEventHandler{
h := &windowsAnsiEventHandler{
fd: fd,
file: file,
infoReset: infoReset,
attributes: infoReset.Attributes,
}
for _, o := range opts {
o(h)
}
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ := os.Create("winEventHandler.log")
logger := log.New(logFile, "", log.LstdFlags)
if h.logf != nil {
l := h.logf
h.logf = func(s string, v ...interface{}) {
l(s, v...)
logger.Printf(s, v...)
}
} else {
h.logf = logger.Printf
}
}
if h.logf == nil {
h.logf = func(string, ...interface{}) {}
}
return h
}
type scrollRegion struct {
@@ -96,7 +113,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
if err := h.Flush(); err != nil {
return false, err
}
logger.Info("Simulating LF inside scroll region")
h.logf("Simulating LF inside scroll region")
if err := h.scrollUp(1); err != nil {
return false, err
}
@@ -119,7 +136,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
} else {
// The cursor is at the bottom of the screen but outside the scroll
// region. Skip the LF.
logger.Info("Simulating LF outside scroll region")
h.logf("Simulating LF outside scroll region")
if includeCR {
if err := h.Flush(); err != nil {
return false, err
@@ -151,7 +168,7 @@ func (h *windowsAnsiEventHandler) executeLF() error {
if err := h.Flush(); err != nil {
return err
}
logger.Info("Resetting cursor position for LF without CR")
h.logf("Resetting cursor position for LF without CR")
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
@@ -186,7 +203,7 @@ func (h *windowsAnsiEventHandler) Print(b byte) error {
func (h *windowsAnsiEventHandler) Execute(b byte) error {
switch b {
case ansiterm.ANSI_TAB:
logger.Info("Execute(TAB)")
h.logf("Execute(TAB)")
// Move to the next tab stop, but preserve auto-wrap if already set.
if !h.wrapNext {
pos, info, err := h.getCurrentInfo()
@@ -269,7 +286,7 @@ func (h *windowsAnsiEventHandler) CUU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(-param)
}
@@ -278,7 +295,7 @@ func (h *windowsAnsiEventHandler) CUD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(param)
}
@@ -287,7 +304,7 @@ func (h *windowsAnsiEventHandler) CUF(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(param)
}
@@ -296,7 +313,7 @@ func (h *windowsAnsiEventHandler) CUB(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(-param)
}
@@ -305,7 +322,7 @@ func (h *windowsAnsiEventHandler) CNL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(param)
}
@@ -314,7 +331,7 @@ func (h *windowsAnsiEventHandler) CPL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(-param)
}
@@ -323,7 +340,7 @@ func (h *windowsAnsiEventHandler) CHA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorColumn(param)
}
@@ -332,7 +349,7 @@ func (h *windowsAnsiEventHandler) VPA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("VPA: [[%d]]", param)
h.logf("VPA: [[%d]]", param)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
@@ -348,7 +365,7 @@ func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUP: [[%d %d]]", row, col)
h.logf("CUP: [[%d %d]]", row, col)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
@@ -364,7 +381,7 @@ func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("HVP: [[%d %d]]", row, col)
h.logf("HVP: [[%d %d]]", row, col)
h.clearWrap()
return h.CUP(row, col)
}
@@ -373,7 +390,7 @@ func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
h.clearWrap()
return nil
}
@@ -382,7 +399,7 @@ func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
h.clearWrap()
h.originMode = enable
return h.CUP(1, 1)
@@ -392,7 +409,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
h.clearWrap()
if err := h.ED(2); err != nil {
return err
@@ -407,7 +424,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
}
if info.Size.X < targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
h.logf("set buffer failed: %v", err)
return err
}
}
@@ -415,12 +432,12 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
window.Left = 0
window.Right = targetWidth - 1
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
logger.Info("set window failed:", err)
h.logf("set window failed: %v", err)
return err
}
if info.Size.X > targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
h.logf("set buffer failed: %v", err)
return err
}
}
@@ -431,7 +448,7 @@ func (h *windowsAnsiEventHandler) ED(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
h.logf("ED: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
@@ -490,7 +507,7 @@ func (h *windowsAnsiEventHandler) EL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("EL: [%v]", strconv.Itoa(param))
h.logf("EL: [%v]", strconv.Itoa(param))
h.clearWrap()
// [K -- Erases from the cursor to the end of the line, including the cursor position.
@@ -531,7 +548,7 @@ func (h *windowsAnsiEventHandler) IL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("IL: [%v]", strconv.Itoa(param))
h.logf("IL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertLines(param)
}
@@ -540,7 +557,7 @@ func (h *windowsAnsiEventHandler) DL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DL: [%v]", strconv.Itoa(param))
h.logf("DL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteLines(param)
}
@@ -549,7 +566,7 @@ func (h *windowsAnsiEventHandler) ICH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ICH: [%v]", strconv.Itoa(param))
h.logf("ICH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertCharacters(param)
}
@@ -558,7 +575,7 @@ func (h *windowsAnsiEventHandler) DCH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DCH: [%v]", strconv.Itoa(param))
h.logf("DCH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteCharacters(param)
}
@@ -572,7 +589,7 @@ func (h *windowsAnsiEventHandler) SGR(params []int) error {
strings = append(strings, strconv.Itoa(v))
}
logger.Infof("SGR: [%v]", strings)
h.logf("SGR: [%v]", strings)
if len(params) <= 0 {
h.attributes = h.infoReset.Attributes
@@ -606,7 +623,7 @@ func (h *windowsAnsiEventHandler) SU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
h.logf("SU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollUp(param)
}
@@ -615,13 +632,13 @@ func (h *windowsAnsiEventHandler) SD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
h.logf("SD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollDown(param)
}
func (h *windowsAnsiEventHandler) DA(params []string) error {
logger.Infof("DA: [%v]", params)
h.logf("DA: [%v]", params)
// DA cannot be implemented because it must send data on the VT100 input stream,
// which is not available to go-ansiterm.
return nil
@@ -631,7 +648,7 @@ func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECSTBM: [%d, %d]", top, bottom)
h.logf("DECSTBM: [%d, %d]", top, bottom)
// Windows is 0 indexed, Linux is 1 indexed
h.sr.top = int16(top - 1)
@@ -646,7 +663,7 @@ func (h *windowsAnsiEventHandler) RI() error {
if err := h.Flush(); err != nil {
return err
}
logger.Info("RI: []")
h.logf("RI: []")
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
@@ -663,21 +680,21 @@ func (h *windowsAnsiEventHandler) RI() error {
}
func (h *windowsAnsiEventHandler) IND() error {
logger.Info("IND: []")
h.logf("IND: []")
return h.executeLF()
}
func (h *windowsAnsiEventHandler) Flush() error {
h.curInfo = nil
if h.buffer.Len() > 0 {
logger.Infof("Flush: [%s]", h.buffer.Bytes())
h.logf("Flush: [%s]", h.buffer.Bytes())
if _, err := h.buffer.WriteTo(h.file); err != nil {
return err
}
}
if h.wrapNext && !h.drewMarginByte {
logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
h.logf("Flush: drawing margin byte '%c'", h.marginByte)
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {

View File

@@ -16,7 +16,6 @@ import (
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
type atomicBool int32
@@ -153,8 +152,6 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
// ioCompletionProcessor processes completed async IOs forever
func ioCompletionProcessor(h syscall.Handle) {
// Set the timer resolution to 1. This fixes a performance regression in golang 1.6.
timeBeginPeriod(1)
for {
var bytes uint32
var key uintptr

View File

@@ -22,6 +22,7 @@ import (
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_NO_DATA = syscall.Errno(232)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
@@ -254,6 +255,36 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
return f, nil
}
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p, err := l.makeServerPipe()
if err != nil {
return nil, err
}
// Wait for the client to connect.
ch := make(chan error)
go func(p *win32File) {
ch <- connectPipe(p)
}(p)
select {
case err = <-ch:
if err != nil {
p.Close()
p = nil
}
case <-l.closeCh:
// Abort the connect request by closing the handle.
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
}
return p, err
}
func (l *win32PipeListener) listenerRoutine() {
closed := false
for !closed {
@@ -261,31 +292,20 @@ func (l *win32PipeListener) listenerRoutine() {
case <-l.closeCh:
closed = true
case responseCh := <-l.acceptCh:
p, err := l.makeServerPipe()
if err == nil {
// Wait for the client to connect.
ch := make(chan error)
go func(p *win32File) {
ch <- connectPipe(p)
}(p)
select {
case err = <-ch:
if err != nil {
p.Close()
p = nil
}
case <-l.closeCh:
// Abort the connect request by closing the handle.
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
closed = true
var (
p *win32File
err error
)
for {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
if err != cERROR_NO_DATA {
break
}
}
responseCh <- acceptResponse{p, err}
closed = err == ErrPipeListenerClosed
}
}
syscall.Close(l.firstHandle)

View File

@@ -422,3 +422,32 @@ func TestEchoWithMessaging(t *testing.T) {
<-listenerDone
<-clientDone
}
func TestConnectRace(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
go func() {
for {
s, err := l.Accept()
if err == ErrPipeListenerClosed {
return
}
if err != nil {
t.Fatal(err)
}
s.Close()
}
}()
for i := 0; i < 1000; i++ {
c, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
c.Close()
}
}

View File

@@ -38,14 +38,12 @@ func errnoErr(e syscall.Errno) error {
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modwinmm = windows.NewLazySystemDLL("winmm.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
@@ -122,12 +120,6 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
func timeBeginPeriod(period uint32) (n int32) {
r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
n = int32(r0)
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {

View File

@@ -12,6 +12,7 @@ Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
## Changelog
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
@@ -172,6 +173,7 @@ And with `FlagsUnsafeGreedy`:
@opennota
@pchristopher1275
@zenovich
@beeker1121
## License

View File

@@ -15,8 +15,8 @@ import (
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/secure/precis"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
)
// A set of normalization flags determines how a URL will
@@ -150,22 +150,26 @@ func MustNormalizeURLString(u string, f NormalizationFlags) string {
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
if parsed, e := url.Parse(u); e != nil {
return "", e
} else {
options := make([]precis.Option, 1, 3)
options[0] = precis.IgnoreCase
if f&FlagLowercaseHost == FlagLowercaseHost {
options = append(options, precis.FoldCase())
}
options = append(options, precis.Norm(norm.NFC))
profile := precis.NewFreeform(options...)
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
return "", e
}
return NormalizeURL(parsed, f), nil
parsed, err := url.Parse(u)
if err != nil {
return "", err
}
panic("Unreachable code.")
if f&FlagLowercaseHost == FlagLowercaseHost {
parsed.Host = strings.ToLower(parsed.Host)
}
// The idna package doesn't fully conform to RFC 5895
// (https://tools.ietf.org/html/rfc5895), so we do it here.
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
// TODO: Remove when (if?) idna package conforms to RFC 5895.
parsed.Host = width.Fold.String(parsed.Host)
parsed.Host = norm.NFC.String(parsed.Host)
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
return "", err
}
return NormalizeURL(parsed, f), nil
}
// NormalizeURL returns the normalized string.

View File

@@ -35,6 +35,7 @@ func TestUrlnorm(t *testing.T) {
"http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode
"http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding
"http://\u00e9.com": "http://xn--9ca.com",
"http://e\u0301.com": "http://xn--9ca.com",
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",

View File

@@ -1,7 +1,11 @@
language: go
go:
- 1.4
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- tip
install:

View File

@@ -1,4 +1,4 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.

View File

@@ -22,3 +22,5 @@ _cgo_export.*
_testmain.go
*.exe
coverage.txt

View File

@@ -1,7 +1,8 @@
language: go
go:
- 1.7.3
- 1.8
- 1.7.x
- 1.8.x
- 1.9.x
env:
global:
@@ -11,8 +12,9 @@ env:
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.0
- KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.2
- KAFKA_VERSION=1.0.0
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
@@ -29,4 +31,7 @@ script:
- make errcheck
- make fmt
after_success:
- bash <(curl -s https://codecov.io/bash)
sudo: false

View File

@@ -1,5 +1,77 @@
# Changelog
#### Version 1.15.0 (2017-12-08)
New Features:
- Claim official support for Kafka 1.0, though it did already work
([#984](https://github.com/Shopify/sarama/pull/984)).
- Helper methods for Kafka version numbers to/from strings
([#989](https://github.com/Shopify/sarama/pull/989)).
- Implement CreatePartitions request/response
([#985](https://github.com/Shopify/sarama/pull/985)).
Improvements:
- Add error codes 45-60
([#986](https://github.com/Shopify/sarama/issues/986)).
Bug Fixes:
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
([#982](https://github.com/Shopify/sarama/pull/982)).
- Correctly determine when a FetchResponse contains the new message format
([#990](https://github.com/Shopify/sarama/pull/990)).
- Fix producing with multiple headers
([#996](https://github.com/Shopify/sarama/pull/996)).
- Fix handling of truncated record batches
([#998](https://github.com/Shopify/sarama/pull/998)).
- Fix leaking metrics when closing brokers
([#991](https://github.com/Shopify/sarama/pull/991)).
#### Version 1.14.0 (2017-11-13)
New Features:
- Add support for the new Kafka 0.11 record-batch format, including the wire
protocol and the necessary behavioural changes in the producer and consumer.
Transactions and idempotency are not yet supported, but producing and
consuming should work with all the existing bells and whistles (batching,
compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
of Arista Networks for this work. Part of
([#901](https://github.com/Shopify/sarama/issues/901)).
Bug Fixes:
- Fix encoding of ProduceResponse versions in test
([#970](https://github.com/Shopify/sarama/pull/970)).
- Return partial replicas list when we have it
([#975](https://github.com/Shopify/sarama/pull/975)).
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08)
New Features:

View File

@@ -1,7 +1,15 @@
default: fmt vet errcheck test
# Taken from https://github.com/codecov/example-go#caveat-multiple-files
test:
go test -v -timeout 60s -race ./...
echo "" > coverage.txt
for d in `go list ./... | grep -v vendor`; do \
go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi \
done
vet:
go vet ./...

View File

@@ -3,6 +3,7 @@ sarama
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
@@ -20,7 +21,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are
Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.

View File

@@ -1,6 +1,7 @@
package sarama
import (
"encoding/binary"
"fmt"
"sync"
"time"
@@ -119,6 +120,10 @@ type ProducerMessage struct {
// StringEncoder and ByteEncoder.
Value Encoder
// The headers are key-value pairs that are transparently passed
// by Kafka between producers and consumers.
Headers []RecordHeader
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
@@ -146,8 +151,16 @@ type ProducerMessage struct {
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize() int {
size := producerMessageOverhead
func (m *ProducerMessage) byteSize(version int) int {
var size int
if version >= 2 {
size = maximumRecordOverhead
for _, h := range m.Headers {
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
}
} else {
size = producerMessageOverhead
}
if m.Key != nil {
size += m.Key.Length()
}
@@ -254,7 +267,11 @@ func (p *asyncProducer) dispatcher() {
p.inFlight.Add(1)
}
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
version := 1
if p.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
}
if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}

View File

@@ -178,6 +178,13 @@ func (b *Broker) Close() error {
b.done = nil
b.responses = nil
if b.id >= 0 {
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
}
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {

View File

@@ -77,10 +77,6 @@ func TestSimpleBrokerCommunication(t *testing.T) {
t.Fatal(err)
}
tt.runner(t, broker)
err = broker.Close()
if err != nil {
t.Error(err)
}
// Wait up to 500 ms for the remote broker to process the request and
// notify us about the metrics
timeout := 500 * time.Millisecond
@@ -91,6 +87,10 @@ func TestSimpleBrokerCommunication(t *testing.T) {
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
}
mb.Close()
err = broker.Close()
if err != nil {
t.Error(err)
}
}
}

View File

@@ -49,9 +49,9 @@ type Client interface {
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time on the topic/partition combination. Time should be OffsetOldest for
// the earliest available offset, OffsetNewest for the offset of the message that
// will be produced next, or a time.
// given time (in milliseconds) on the topic/partition combination.
// Time should be OffsetOldest for the earliest available offset,
// OffsetNewest for the offset of the message that will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
@@ -297,7 +297,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
return dupInt32Slice(metadata.Replicas), metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
@@ -322,7 +322,7 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32,
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
return dupInt32Slice(metadata.Isr), metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}

View File

@@ -33,6 +33,169 @@ func TestEmptyClientIDConfigValidates(t *testing.T) {
}
}
func TestNetConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"OpenRequests",
func(cfg *Config) {
cfg.Net.MaxOpenRequests = 0
},
"Net.MaxOpenRequests must be > 0"},
{"DialTimeout",
func(cfg *Config) {
cfg.Net.DialTimeout = 0
},
"Net.DialTimeout must be > 0"},
{"ReadTimeout",
func(cfg *Config) {
cfg.Net.ReadTimeout = 0
},
"Net.ReadTimeout must be > 0"},
{"WriteTimeout",
func(cfg *Config) {
cfg.Net.WriteTimeout = 0
},
"Net.WriteTimeout must be > 0"},
{"KeepAlive",
func(cfg *Config) {
cfg.Net.KeepAlive = -1
},
"Net.KeepAlive must be >= 0"},
{"SASL.User",
func(cfg *Config) {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = ""
},
"Net.SASL.User must not be empty when SASL is enabled"},
{"SASL.Password",
func(cfg *Config) {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = ""
},
"Net.SASL.Password must not be empty when SASL is enabled"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestMetadataConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"Retry.Max",
func(cfg *Config) {
cfg.Metadata.Retry.Max = -1
},
"Metadata.Retry.Max must be >= 0"},
{"Retry.Backoff",
func(cfg *Config) {
cfg.Metadata.Retry.Backoff = -1
},
"Metadata.Retry.Backoff must be >= 0"},
{"RefreshFrequency",
func(cfg *Config) {
cfg.Metadata.RefreshFrequency = -1
},
"Metadata.RefreshFrequency must be >= 0"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestProducerConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"MaxMessageBytes",
func(cfg *Config) {
cfg.Producer.MaxMessageBytes = 0
},
"Producer.MaxMessageBytes must be > 0"},
{"RequiredAcks",
func(cfg *Config) {
cfg.Producer.RequiredAcks = -2
},
"Producer.RequiredAcks must be >= -1"},
{"Timeout",
func(cfg *Config) {
cfg.Producer.Timeout = 0
},
"Producer.Timeout must be > 0"},
{"Partitioner",
func(cfg *Config) {
cfg.Producer.Partitioner = nil
},
"Producer.Partitioner must not be nil"},
{"Flush.Bytes",
func(cfg *Config) {
cfg.Producer.Flush.Bytes = -1
},
"Producer.Flush.Bytes must be >= 0"},
{"Flush.Messages",
func(cfg *Config) {
cfg.Producer.Flush.Messages = -1
},
"Producer.Flush.Messages must be >= 0"},
{"Flush.Frequency",
func(cfg *Config) {
cfg.Producer.Flush.Frequency = -1
},
"Producer.Flush.Frequency must be >= 0"},
{"Flush.MaxMessages",
func(cfg *Config) {
cfg.Producer.Flush.MaxMessages = -1
},
"Producer.Flush.MaxMessages must be >= 0"},
{"Flush.MaxMessages with Producer.Flush.Messages",
func(cfg *Config) {
cfg.Producer.Flush.MaxMessages = 1
cfg.Producer.Flush.Messages = 2
},
"Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set"},
{"Flush.Retry.Max",
func(cfg *Config) {
cfg.Producer.Retry.Max = -1
},
"Producer.Retry.Max must be >= 0"},
{"Flush.Retry.Backoff",
func(cfg *Config) {
cfg.Producer.Retry.Backoff = -1
},
"Producer.Retry.Backoff must be >= 0"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestLZ4ConfigValidation(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionLZ4

View File

@@ -14,8 +14,9 @@ type ConsumerMessage struct {
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Headers []*RecordHeader // only set if kafka is version 0.11+
}
// ConsumerError is what is provided to the user when an error occurs.
@@ -478,44 +479,12 @@ feederLoop:
close(child.errors)
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
if len(block.MsgSet.Messages) == 0 {
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if block.MsgSet.PartialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
incomplete := false
prelude := true
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range block.MsgSet.Messages {
var incomplete bool
prelude := true
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
@@ -542,7 +511,6 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
incomplete = true
}
}
}
if incomplete || len(messages) == 0 {
@@ -551,6 +519,91 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
var incomplete bool
prelude := true
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if prelude && offset < child.offset {
continue
}
prelude = false
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
Headers: rec.Headers,
})
child.offset = offset + 1
} else {
incomplete = true
}
}
if incomplete || len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
nRecs, err := block.Records.numRecords()
if err != nil {
return nil, err
}
if nRecs == 0 {
partialTrailingMessage, err := block.Records.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
if control, err := block.Records.isControl(); err != nil || control {
return nil, err
}
if block.Records.recordsType == legacyRecords {
return child.parseMessages(block.Records.msgSet)
}
return child.parseRecords(block.Records.recordBatch)
}
// brokerConsumer
type brokerConsumer struct {
@@ -740,6 +793,10 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = ReadUncommitted // We don't support yet transactions.
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)

View File

@@ -379,39 +379,99 @@ func TestConsumerShutsDownOutOfRange(t *testing.T) {
// requested, then such messages are ignored.
func TestConsumerExtraOffsets(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse1 := &FetchResponse{}
legacyFetchResponse := &FetchResponse{}
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
newFetchResponse := &FetchResponse{Version: 4}
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16
cfg := NewConfig()
if fetchResponse1.Version >= 4 {
cfg.Version = V0_11_0_0
offsetResponseVersion = 1
}
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{}
fetchResponse2.Version = fetchResponse1.Version
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(offsetResponseVersion).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 3)
assertMessageOffset(t, <-consumer.Messages(), 4)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
}
func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
// Given
fetchResponse1 := &FetchResponse{Version: 4}
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
cfg := NewConfig()
cfg.Version = V0_11_0_0
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{}
fetchResponse2.Version = 4
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(1).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 3)
assertMessageOffset(t, <-consumer.Messages(), 4)
assertMessageOffset(t, <-consumer.Messages(), 1)
assertMessageOffset(t, <-consumer.Messages(), 2)
safeClose(t, consumer)
safeClose(t, master)
@@ -422,43 +482,58 @@ func TestConsumerExtraOffsets(t *testing.T) {
// strictly increasing!).
func TestConsumerNonSequentialOffsets(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse1 := &FetchResponse{}
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
fetchResponse2 := &FetchResponse{}
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
legacyFetchResponse := &FetchResponse{}
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
newFetchResponse := &FetchResponse{Version: 4}
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16
cfg := NewConfig()
if fetchResponse1.Version >= 4 {
cfg.Version = V0_11_0_0
offsetResponseVersion = 1
}
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(offsetResponseVersion).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 5)
assertMessageOffset(t, <-consumer.Messages(), 7)
assertMessageOffset(t, <-consumer.Messages(), 11)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 5)
assertMessageOffset(t, <-consumer.Messages(), 7)
assertMessageOffset(t, <-consumer.Messages(), 11)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If leadership for a partition is changing then consumer resolves the new

View File

@@ -6,9 +6,19 @@ import (
"hash/crc32"
)
type crcPolynomial int8
const (
crcIEEE crcPolynomial = iota
crcCastagnoli
)
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
polynomial crcPolynomial
}
func (c *crc32Field) saveOffset(in int) {
@@ -19,14 +29,24 @@ func (c *crc32Field) reserveLength() int {
return 4
}
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
return &crc32Field{polynomial: polynomial}
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
@@ -35,3 +55,15 @@ func (c *crc32Field) check(curOffset int, buf []byte) error {
return nil
}
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
var tab *crc32.Table
switch c.polynomial {
case crcIEEE:
tab = crc32.IEEETable
case crcCastagnoli:
tab = castagnoliTable
default:
return 0, PacketDecodingError{"invalid CRC type"}
}
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
}

View File

@@ -0,0 +1,121 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,50 @@
package sarama
import (
"testing"
"time"
)
var (
createPartitionRequestNoAssignment = []byte{
0, 0, 0, 1, // one topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
255, 255, 255, 255, // no assignments
0, 0, 0, 100, // timeout
0, // validate only = false
}
createPartitionRequestAssignment = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
0, 0, 0, 2,
0, 0, 0, 2,
0, 0, 0, 2, 0, 0, 0, 3,
0, 0, 0, 2,
0, 0, 0, 3, 0, 0, 0, 1,
0, 0, 0, 100,
1, // validate only = true
}
)
func TestCreatePartitionsRequest(t *testing.T) {
req := &CreatePartitionsRequest{
TopicPartitions: map[string]*TopicPartition{
"topic": &TopicPartition{
Count: 3,
},
},
Timeout: 100 * time.Millisecond,
}
buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment)
testRequestDecode(t, "no assignment", req, buf)
req.ValidateOnly = true
req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}}
buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment)
testRequestDecode(t, "assignment", req, buf)
}

View File

@@ -0,0 +1,94 @@
package sarama
import "time"
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,52 @@
package sarama
import (
"reflect"
"testing"
"time"
)
var (
createPartitionResponseSuccess = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, // no error
255, 255, // no error message
}
createPartitionResponseFail = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 37, // partition error
0, 5, 'e', 'r', 'r', 'o', 'r',
}
)
func TestCreatePartitionsResponse(t *testing.T) {
resp := &CreatePartitionsResponse{
ThrottleTime: 100 * time.Millisecond,
TopicPartitionErrors: map[string]*TopicPartitionError{
"topic": &TopicPartitionError{},
},
}
testResponse(t, "success", resp, createPartitionResponseSuccess)
decodedresp := new(CreatePartitionsResponse)
testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
errMsg := "error"
resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions
resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg
testResponse(t, "with errors", resp, createPartitionResponseFail)
decodedresp = new(CreatePartitionsResponse)
testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
}

View File

@@ -2,13 +2,9 @@ name: sarama
up:
- go:
version: '1.8'
version: '1.9'
commands:
test:
run: make test
desc: 'run unit tests'
packages:
- git@github.com:Shopify/dev-shopify.git

View File

@@ -71,52 +71,68 @@ type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
)
func (err KError) Error() string {
@@ -215,6 +231,38 @@ func (err KError) Error() string {
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)

View File

@@ -29,16 +29,27 @@ type FetchRequest struct {
MinBytes int32
MaxBytes int32
Version int16
Isolation IsolationLevel
blocks map[string]map[int32]*fetchRequestBlock
}
type IsolationLevel int8
const (
ReadUncommitted IsolationLevel = 0
ReadCommitted IsolationLevel = 1
)
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version == 3 {
if r.Version >= 3 {
pe.putInt32(r.MaxBytes)
}
if r.Version >= 4 {
pe.putInt8(int8(r.Isolation))
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
@@ -74,11 +85,18 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version == 3 {
if r.Version >= 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
if r.Version >= 4 {
isolation, err := pd.getInt8()
if err != nil {
return err
}
r.Isolation = IsolationLevel(isolation)
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
@@ -128,6 +146,8 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return minVersion
}

View File

@@ -17,6 +17,15 @@ var (
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
fetchRequestOneBlockV4 = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF,
0x01,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
)
func TestFetchRequest(t *testing.T) {
@@ -31,4 +40,9 @@ func TestFetchRequest(t *testing.T) {
request.MinBytes = 0
request.AddBlock("topic", 0x12, 0x34, 0x56)
testRequest(t, "one block", request, fetchRequestOneBlock)
request.Version = 4
request.MaxBytes = 0xFF
request.Isolation = ReadCommitted
testRequest(t, "one block v4", request, fetchRequestOneBlockV4)
}

View File

@@ -2,13 +2,39 @@ package sarama
import "time"
type AbortedTransaction struct {
ProducerID int64
FirstOffset int64
}
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
return nil
}
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
pe.putInt64(t.ProducerID)
pe.putInt64(t.FirstOffset)
return nil
}
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
MsgSet MessageSet
LastStableOffset int64
AbortedTransactions []*AbortedTransaction
Records Records
}
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
@@ -20,27 +46,68 @@ func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
return err
}
msgSetSize, err := pd.getInt32()
if version >= 4 {
b.LastStableOffset, err = pd.getInt64()
if err != nil {
return err
}
numTransact, err := pd.getArrayLength()
if err != nil {
return err
}
if numTransact >= 0 {
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
}
for i := 0; i < numTransact; i++ {
transact := new(AbortedTransaction)
if err = transact.decode(pd); err != nil {
return err
}
b.AbortedTransactions[i] = transact
}
}
recordsSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
recordsDecoder, err := pd.getSubset(int(recordsSize))
if err != nil {
return err
}
err = (&b.MsgSet).decode(msgSetDecoder)
if recordsSize > 0 {
if err = b.Records.decode(recordsDecoder); err != nil {
return err
}
}
return err
return nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
if version >= 4 {
pe.putInt64(b.LastStableOffset)
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
return err
}
for _, transact := range b.AbortedTransactions {
if err = transact.encode(pe); err != nil {
return err
}
}
}
pe.push(&lengthField{})
err = b.MsgSet.encode(pe)
err = b.Records.encode(pe)
if err != nil {
return err
}
@@ -90,7 +157,7 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
}
block := new(FetchResponseBlock)
err = block.decode(pd)
err = block.decode(pd, version)
if err != nil {
return err
}
@@ -124,7 +191,7 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
@@ -148,6 +215,10 @@ func (r *FetchResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return minVersion
}
@@ -182,7 +253,7 @@ func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
frb.Err = err
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
@@ -196,6 +267,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
return frb
}
func encodeKV(key, value Encoder) ([]byte, []byte) {
var kb []byte
var vb []byte
if key != nil {
@@ -204,7 +280,36 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
if value != nil {
vb, _ = value.Encode()
}
return kb, vb
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
set := frb.Records.msgSet
if set == nil {
set = &MessageSet{}
frb.Records = newLegacyRecords(set)
}
set.Messages = append(set.Messages, msgBlock)
}
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
batch := frb.Records.recordBatch
if batch == nil {
batch = &RecordBatch{Version: 2}
frb.Records = newDefaultRecords(batch)
}
batch.addRecord(rec)
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset
}

View File

@@ -26,6 +26,63 @@ var (
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
oneRecordFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00, // ThrottleTime
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0x05, // Partition
0x00, 0x01, // Error
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
0x00, 0x00, 0x00, 0x52, // Records length
// recordBatch
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x46,
0x00, 0x00, 0x00, 0x00,
0x02,
0xDB, 0x47, 0x14, 0xC9,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
// record
0x28,
0x00,
0x0A,
0x00,
0x08, 0x01, 0x02, 0x03, 0x04,
0x06, 0x05, 0x06, 0x07,
0x02,
0x06, 0x08, 0x09, 0x0A,
0x04, 0x0B, 0x0C}
oneMessageFetchResponseV4 = []byte{
0x00, 0x00, 0x00, 0x00, // ThrottleTime
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0x05, // Partition
0x00, 0x01, // Error
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
)
func TestEmptyFetchResponse(t *testing.T) {
@@ -60,14 +117,121 @@ func TestOneMessageFetchResponse(t *testing.T) {
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
if block.MsgSet.PartialTrailingMessage {
partial, err := block.Records.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing message where there wasn't one.")
}
if len(block.MsgSet.Messages) != 1 {
n, err := block.Records.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of messages.")
}
msgBlock := block.MsgSet.Messages[0]
msgBlock := block.Records.msgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
msg := msgBlock.Msg
if msg.Codec != CompressionNone {
t.Error("Decoding produced incorrect message compression.")
}
if msg.Key != nil {
t.Error("Decoding produced message key where there was none.")
}
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
t.Error("Decoding produced incorrect message value.")
}
}
func TestOneRecordFetchResponse(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one record", &response, oneRecordFetchResponse, 4)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.Records.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
rec := block.Records.recordBatch.Records[0]
if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) {
t.Error("Decoding produced incorrect record key.")
}
if !bytes.Equal(rec.Value, []byte{0x05, 0x06, 0x07}) {
t.Error("Decoding produced incorrect record value.")
}
}
func TestOneMessageFetchResponseV4(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.Records.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
msgBlock := block.Records.msgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}

View File

@@ -27,3 +27,43 @@ func (l *lengthField) check(curOffset int, buf []byte) error {
return nil
}
type varintLengthField struct {
startOffset int
length int64
}
func (l *varintLengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getVarint()
return err
}
func (l *varintLengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *varintLengthField) adjustLength(currOffset int) int {
oldFieldSize := l.reserveLength()
l.length = int64(currOffset - l.startOffset - oldFieldSize)
return l.reserveLength() - oldFieldSize
}
func (l *varintLengthField) reserveLength() int {
var tmp [binary.MaxVarintLen64]byte
return binary.PutVarint(tmp[:], l.length)
}
func (l *varintLengthField) run(curOffset int, buf []byte) error {
binary.PutVarint(buf[l.startOffset:], l.length)
return nil
}
func (l *varintLengthField) check(curOffset int, buf []byte) error {
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@@ -37,7 +37,7 @@ type Message struct {
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(&crc32Field{})
pe.push(newCRC32Field(crcIEEE))
pe.putInt8(m.Version)
@@ -45,15 +45,9 @@ func (m *Message) encode(pe packetEncoder) error {
pe.putInt8(attributes)
if m.Version >= 1 {
timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
return err
}
pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
@@ -112,7 +106,7 @@ func (m *Message) encode(pe packetEncoder) error {
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(&crc32Field{})
err = pd.push(newCRC32Field(crcIEEE))
if err != nil {
return err
}
@@ -133,19 +127,9 @@ func (m *Message) decode(pd packetDecoder) (err error) {
m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version == 1 {
millis, err := pd.getInt64()
if err != nil {
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()

View File

@@ -107,7 +107,7 @@ func TestMessageEncoding(t *testing.T) {
message.Value = []byte{}
message.Codec = CompressionGZIP
if runtime.Version() == "go1.8" || strings.HasPrefix(runtime.Version(), "go1.8.") {
if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
} else {
testEncodable(t, "empty gzip", &message, emptyGzipMessage)

View File

@@ -122,6 +122,7 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
type MockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t TestReporter
version int16
}
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
@@ -131,6 +132,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
}
}
func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
mor.version = version
return mor
}
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
@@ -148,7 +154,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{}
offsetResponse := &OffsetResponse{Version: mor.version}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
@@ -402,7 +408,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
for topic, partitions := range req.msgSets {
for topic, partitions := range req.records {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}

View File

@@ -340,7 +340,7 @@ func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset < pom.offset {
if offset <= pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true

View File

@@ -9,11 +9,16 @@ type packetDecoder interface {
getInt16() (int16, error)
getInt32() (int32, error)
getInt64() (int64, error)
getVarint() (int64, error)
getArrayLength() (int, error)
getBool() (bool, error)
// Collections
getBytes() ([]byte, error)
getVarintBytes() ([]byte, error)
getRawBytes(length int) ([]byte, error)
getString() (string, error)
getNullableString() (*string, error)
getInt32Array() ([]int32, error)
getInt64Array() ([]int64, error)
getStringArray() ([]string, error)
@@ -21,6 +26,7 @@ type packetDecoder interface {
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
// Stacks, see PushDecoder
push(in pushDecoder) error
@@ -43,3 +49,12 @@ type pushDecoder interface {
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
check(curOffset int, buf []byte) error
}
// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
// fields itself is unknown until its value was decoded (for instance varint encoded length
// fields).
// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
type dynamicPushDecoder interface {
pushDecoder
decoder
}

View File

@@ -11,12 +11,16 @@ type packetEncoder interface {
putInt16(in int16)
putInt32(in int32)
putInt64(in int64)
putVarint(in int64)
putArrayLength(in int) error
putBool(in bool)
// Collections
putBytes(in []byte) error
putVarintBytes(in []byte) error
putRawBytes(in []byte) error
putString(in string) error
putNullableString(in *string) error
putStringArray(in []string) error
putInt32Array(in []int32) error
putInt64Array(in []int64) error
@@ -48,3 +52,14 @@ type pushEncoder interface {
// of data to the saved offset, based on the data between the saved offset and curOffset.
run(curOffset int, buf []byte) error
}
// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
// fields itself is unknown until its value was computed (for instance varint encoded length
// fields).
type dynamicPushEncoder interface {
pushEncoder
// Called during pop() to adjust the length of the field.
// It should return the difference in bytes between the last computed length and current length.
adjustLength(currOffset int) int
}

View File

@@ -1,6 +1,7 @@
package sarama
import (
"encoding/binary"
"fmt"
"math"
@@ -8,6 +9,7 @@ import (
)
type prepEncoder struct {
stack []pushEncoder
length int
}
@@ -29,6 +31,11 @@ func (pe *prepEncoder) putInt64(in int64) {
pe.length += 8
}
func (pe *prepEncoder) putVarint(in int64) {
var buf [binary.MaxVarintLen64]byte
pe.length += binary.PutVarint(buf[:], in)
}
func (pe *prepEncoder) putArrayLength(in int) error {
if in > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
@@ -37,6 +44,10 @@ func (pe *prepEncoder) putArrayLength(in int) error {
return nil
}
func (pe *prepEncoder) putBool(in bool) {
pe.length++
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {
@@ -44,11 +55,16 @@ func (pe *prepEncoder) putBytes(in []byte) error {
if in == nil {
return nil
}
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
return pe.putRawBytes(in)
}
func (pe *prepEncoder) putVarintBytes(in []byte) error {
if in == nil {
pe.putVarint(-1)
return nil
}
pe.length += len(in)
return nil
pe.putVarint(int64(len(in)))
return pe.putRawBytes(in)
}
func (pe *prepEncoder) putRawBytes(in []byte) error {
@@ -59,6 +75,14 @@ func (pe *prepEncoder) putRawBytes(in []byte) error {
return nil
}
func (pe *prepEncoder) putNullableString(in *string) error {
if in == nil {
pe.length += 2
return nil
}
return pe.putString(*in)
}
func (pe *prepEncoder) putString(in string) error {
pe.length += 2
if len(in) > math.MaxInt16 {
@@ -108,10 +132,18 @@ func (pe *prepEncoder) offset() int {
// stackable
func (pe *prepEncoder) push(in pushEncoder) {
in.saveOffset(pe.length)
pe.length += in.reserveLength()
pe.stack = append(pe.stack, in)
}
func (pe *prepEncoder) pop() error {
in := pe.stack[len(pe.stack)-1]
pe.stack = pe.stack[:len(pe.stack)-1]
if dpe, ok := in.(dynamicPushEncoder); ok {
pe.length += dpe.adjustLength(pe.length)
}
return nil
}

View File

@@ -21,19 +21,56 @@ const (
)
type ProduceRequest struct {
RequiredAcks RequiredAcks
Timeout int32
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
msgSets map[string]map[int32]*MessageSet
TransactionalID *string
RequiredAcks RequiredAcks
Timeout int32
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
records map[string]map[int32]Records
}
func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
topicCompressionRatioMetric metrics.Histogram) int64 {
var topicRecordCount int64
for _, messageBlock := range msgSet.Messages {
// Is this a fake "message" wrapping real messages?
if messageBlock.Msg.Set != nil {
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
} else {
// A single uncompressed message
topicRecordCount++
}
// Better be safe than sorry when computing the compression ratio
if messageBlock.Msg.compressedSize != 0 {
compressionRatio := float64(len(messageBlock.Msg.Value)) /
float64(messageBlock.Msg.compressedSize)
// Histogram do not support decimal values, let's multiple it by 100 for better precision
intCompressionRatio := int64(100 * compressionRatio)
compressionRatioMetric.Update(intCompressionRatio)
topicCompressionRatioMetric.Update(intCompressionRatio)
}
}
return topicRecordCount
}
func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
topicCompressionRatioMetric metrics.Histogram) int64 {
if recordBatch.compressedRecords != nil {
compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
compressionRatioMetric.Update(compressionRatio)
topicCompressionRatioMetric.Update(compressionRatio)
}
return int64(len(recordBatch.Records))
}
func (r *ProduceRequest) encode(pe packetEncoder) error {
if r.Version >= 3 {
if err := pe.putNullableString(r.TransactionalID); err != nil {
return err
}
}
pe.putInt16(int16(r.RequiredAcks))
pe.putInt32(r.Timeout)
err := pe.putArrayLength(len(r.msgSets))
if err != nil {
return err
}
metricRegistry := pe.metricRegistry()
var batchSizeMetric metrics.Histogram
var compressionRatioMetric metrics.Histogram
@@ -41,9 +78,14 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
}
totalRecordCount := int64(0)
for topic, partitions := range r.msgSets {
err := pe.putArrayLength(len(r.records))
if err != nil {
return err
}
for topic, partitions := range r.records {
err = pe.putString(topic)
if err != nil {
return err
@@ -57,11 +99,11 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
if metricRegistry != nil {
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
}
for id, msgSet := range partitions {
for id, records := range partitions {
startOffset := pe.offset()
pe.putInt32(id)
pe.push(&lengthField{})
err = msgSet.encode(pe)
err = records.encode(pe)
if err != nil {
return err
}
@@ -70,23 +112,10 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
return err
}
if metricRegistry != nil {
for _, messageBlock := range msgSet.Messages {
// Is this a fake "message" wrapping real messages?
if messageBlock.Msg.Set != nil {
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
} else {
// A single uncompressed message
topicRecordCount++
}
// Better be safe than sorry when computing the compression ratio
if messageBlock.Msg.compressedSize != 0 {
compressionRatio := float64(len(messageBlock.Msg.Value)) /
float64(messageBlock.Msg.compressedSize)
// Histogram do not support decimal values, let's multiple it by 100 for better precision
intCompressionRatio := int64(100 * compressionRatio)
compressionRatioMetric.Update(intCompressionRatio)
topicCompressionRatioMetric.Update(intCompressionRatio)
}
if r.Version >= 3 {
topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric)
} else {
topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric)
}
batchSize := int64(pe.offset() - startOffset)
batchSizeMetric.Update(batchSize)
@@ -108,6 +137,15 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
}
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
if version >= 3 {
id, err := pd.getNullableString()
if err != nil {
return err
}
r.TransactionalID = id
}
requiredAcks, err := pd.getInt16()
if err != nil {
return err
@@ -123,7 +161,8 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
if topicCount == 0 {
return nil
}
r.msgSets = make(map[string]map[int32]*MessageSet)
r.records = make(map[string]map[int32]Records)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
@@ -133,28 +172,29 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
if err != nil {
return err
}
r.msgSets[topic] = make(map[int32]*MessageSet)
r.records[topic] = make(map[int32]Records)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
messageSetSize, err := pd.getInt32()
size, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
recordsDecoder, err := pd.getSubset(int(size))
if err != nil {
return err
}
msgSet := &MessageSet{}
err = msgSet.decode(msgSetDecoder)
if err != nil {
var records Records
if err := records.decode(recordsDecoder); err != nil {
return err
}
r.msgSets[topic][partition] = msgSet
r.records[topic][partition] = records
}
}
return nil
}
@@ -172,38 +212,41 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_11_0_0
default:
return minVersion
}
}
func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
if r.records == nil {
r.records = make(map[string]map[int32]Records)
}
if r.records[topic] == nil {
r.records[topic] = make(map[int32]Records)
}
}
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
set := r.msgSets[topic][partition]
r.ensureRecords(topic, partition)
set := r.records[topic][partition].msgSet
if set == nil {
set = new(MessageSet)
r.msgSets[topic][partition] = set
r.records[topic][partition] = newLegacyRecords(set)
}
set.addMessage(msg)
}
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
r.msgSets[topic][partition] = set
r.ensureRecords(topic, partition)
r.records[topic][partition] = newLegacyRecords(set)
}
func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
r.ensureRecords(topic, partition)
r.records[topic][partition] = newDefaultRecords(batch)
}

View File

@@ -2,6 +2,7 @@ package sarama
import (
"testing"
"time"
)
var (
@@ -32,6 +33,41 @@ var (
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
produceRequestOneRecord = []byte{
0xFF, 0xFF, // Transaction ID
0x01, 0x23, // Required Acks
0x00, 0x00, 0x04, 0x44, // Timeout
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0xAD, // Partition
0x00, 0x00, 0x00, 0x52, // Records length
// recordBatch
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x46,
0x00, 0x00, 0x00, 0x00,
0x02,
0x54, 0x79, 0x61, 0xFD,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
// record
0x28,
0x00,
0x0A,
0x00,
0x08, 0x01, 0x02, 0x03, 0x04,
0x06, 0x05, 0x06, 0x07,
0x02,
0x06, 0x08, 0x09, 0x0A,
0x04, 0x0B, 0x0C,
}
)
func TestProduceRequest(t *testing.T) {
@@ -44,4 +80,24 @@ func TestProduceRequest(t *testing.T) {
request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
testRequest(t, "one message", request, produceRequestOneMessage)
request.Version = 3
batch := &RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{0x01, 0x02, 0x03, 0x04},
Value: []byte{0x05, 0x06, 0x07},
Headers: []*RecordHeader{{
Key: []byte{0x08, 0x09, 0x0A},
Value: []byte{0x0B, 0x0C},
}},
}},
}
request.AddBatch("topic", 0xAD, batch)
packet := testRequestEncode(t, "one record", request, produceRequestOneRecord)
batch.Records[0].length.startOffset = 0
testRequestDecode(t, "one record", request, packet)
}

View File

@@ -1,6 +1,9 @@
package sarama
import "time"
import (
"fmt"
"time"
)
type ProduceResponseBlock struct {
Err KError
@@ -32,6 +35,23 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro
return nil
}
func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.Offset)
if version >= 2 {
timestamp := int64(-1)
if !b.Timestamp.Before(time.Unix(0, 0)) {
timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !b.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
}
pe.putInt64(timestamp)
}
return nil
}
type ProduceResponse struct {
Blocks map[string]map[int32]*ProduceResponseBlock
Version int16
@@ -103,8 +123,10 @@ func (r *ProduceResponse) encode(pe packetEncoder) error {
}
for id, prb := range partitions {
pe.putInt32(id)
pe.putInt16(int16(prb.Err))
pe.putInt64(prb.Offset)
err = prb.encode(pe, r.Version)
if err != nil {
return err
}
}
}
if r.Version >= 1 {
@@ -127,6 +149,8 @@ func (r *ProduceResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_11_0_0
default:
return minVersion
}

View File

@@ -1,67 +1,128 @@
package sarama
import "testing"
var (
produceResponseNoBlocks = []byte{
0x00, 0x00, 0x00, 0x00}
produceResponseManyBlocks = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0x00, 0x00, 0x00, 0x02,
0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
import (
"fmt"
"testing"
"time"
)
func TestProduceResponse(t *testing.T) {
var (
produceResponseNoBlocksV0 = []byte{
0x00, 0x00, 0x00, 0x00}
produceResponseManyBlocksVersions = [][]byte{
{
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
}, {
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
}, {
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used)
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
},
}
)
func TestProduceResponseDecode(t *testing.T) {
response := ProduceResponse{}
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0)
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocksV0, 0)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
}
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0)
if len(response.Blocks) != 2 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
}
if len(response.Blocks["foo"]) != 0 {
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
}
if len(response.Blocks["bar"]) != 2 {
t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
}
block := response.GetBlock("bar", 1)
if block == nil {
t.Error("Decoding did not produce a block for bar/1")
} else {
if block.Err != ErrNoError {
t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
t.Logf("Decoding produceResponseManyBlocks version %d", v)
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, int16(v))
if len(response.Blocks) != 1 {
t.Error("Decoding produced", len(response.Blocks), "topics where there was 1")
}
if block.Offset != 0xFF {
t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
if len(response.Blocks["foo"]) != 1 {
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there was one")
}
}
block = response.GetBlock("bar", 2)
if block == nil {
t.Error("Decoding did not produce a block for bar/2")
} else {
if block.Err != ErrInvalidMessage {
t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
block := response.GetBlock("foo", 1)
if block == nil {
t.Error("Decoding did not produce a block for foo/1")
} else {
if block.Err != ErrInvalidMessage {
t.Error("Decoding failed for foo/2/Err, got:", int16(block.Err))
}
if block.Offset != 255 {
t.Error("Decoding failed for foo/1/Offset, got:", block.Offset)
}
if v >= 2 {
if block.Timestamp != time.Unix(1, 0) {
t.Error("Decoding failed for foo/2/Timestamp, got:", block.Timestamp)
}
}
}
if block.Offset != 0 {
t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
if v >= 1 {
if expected := 100 * time.Millisecond; response.ThrottleTime != expected {
t.Error("Failed decoding produced throttle time, expected:", expected, ", got:", response.ThrottleTime)
}
}
}
}
func TestProduceResponseEncode(t *testing.T) {
response := ProduceResponse{}
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
testEncodable(t, "empty", &response, produceResponseNoBlocksV0)
response.Blocks["foo"] = make(map[int32]*ProduceResponseBlock)
response.Blocks["foo"][1] = &ProduceResponseBlock{
Err: ErrInvalidMessage,
Offset: 255,
Timestamp: time.Unix(1, 0),
}
response.ThrottleTime = 100 * time.Millisecond
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
response.Version = int16(v)
testEncodable(t, fmt.Sprintf("many blocks version %d", v), &response, produceResponseManyBlocks)
}
}
func TestProduceResponseEncodeInvalidTimestamp(t *testing.T) {
response := ProduceResponse{}
response.Version = 2
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
response.Blocks["t"] = make(map[int32]*ProduceResponseBlock)
response.Blocks["t"][0] = &ProduceResponseBlock{
Err: ErrNoError,
Offset: 0,
// Use a timestamp before Unix time
Timestamp: time.Unix(0, 0).Add(-1 * time.Millisecond),
}
response.ThrottleTime = 100 * time.Millisecond
_, err := encode(&response, nil)
if err == nil {
t.Error("Expecting error, got nil")
}
if _, ok := err.(PacketEncodingError); !ok {
t.Error("Expecting PacketEncodingError, got:", err)
}
}

View File

@@ -1,11 +1,14 @@
package sarama
import "time"
import (
"encoding/binary"
"time"
)
type partitionSet struct {
msgs []*ProducerMessage
setToSend *MessageSet
bufferBytes int
msgs []*ProducerMessage
recordsToSend Records
bufferBytes int
}
type produceSet struct {
@@ -39,31 +42,64 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
}
}
timestamp := msg.Timestamp
if msg.Timestamp.IsZero() {
timestamp = time.Now()
}
partitions := ps.msgs[msg.Topic]
if partitions == nil {
partitions = make(map[int32]*partitionSet)
ps.msgs[msg.Topic] = partitions
}
var size int
set := partitions[msg.Partition]
if set == nil {
set = &partitionSet{setToSend: new(MessageSet)}
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
batch := &RecordBatch{
FirstTimestamp: timestamp,
Version: 2,
ProducerID: -1, /* No producer id */
Codec: ps.parent.conf.Producer.Compression,
}
set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
size = recordBatchOverhead
} else {
set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
}
partitions[msg.Partition] = set
}
set.msgs = append(set.msgs, msg)
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
if msg.Timestamp.IsZero() {
msgToSend.Timestamp = time.Now()
} else {
msgToSend.Timestamp = msg.Timestamp
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
// We are being conservative here to avoid having to prep encode the record
size += maximumRecordOverhead
rec := &Record{
Key: key,
Value: val,
TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp),
}
msgToSend.Version = 1
size += len(key) + len(val)
if len(msg.Headers) > 0 {
rec.Headers = make([]*RecordHeader, len(msg.Headers))
for i := range msg.Headers {
rec.Headers[i] = &msg.Headers[i]
size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
}
}
set.recordsToSend.recordBatch.addRecord(rec)
} else {
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
msgToSend.Timestamp = timestamp
msgToSend.Version = 1
}
set.recordsToSend.msgSet.addMessage(msgToSend)
size = producerMessageOverhead + len(key) + len(val)
}
set.setToSend.addMessage(msgToSend)
size := producerMessageOverhead + len(key) + len(val)
set.bufferBytes += size
ps.bufferBytes += size
ps.bufferCount++
@@ -79,17 +115,24 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 2
}
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
req.Version = 3
}
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
if req.Version >= 3 {
req.AddBatch(topic, partition, set.recordsToSend.recordBatch)
continue
}
if ps.parent.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, set.setToSend)
req.AddSet(topic, partition, set.recordsToSend.msgSet)
} else {
// When compression is enabled, the entire set for each partition is compressed
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
@@ -98,11 +141,11 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
Codec: ps.parent.conf.Producer.Compression,
Key: nil,
Value: payload,
Set: set.setToSend, // Provide the underlying message set for accurate metrics
Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
compMsg.Version = 1
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp
}
req.AddMessage(topic, partition, compMsg)
}
@@ -135,14 +178,19 @@ func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMe
}
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
version := 1
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
}
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch for this partition?
case ps.parent.conf.Producer.Compression != CompressionNone &&
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:

View File

@@ -1,6 +1,7 @@
package sarama
import (
"fmt"
"testing"
"time"
)
@@ -137,7 +138,7 @@ func TestProduceSetRequestBuilding(t *testing.T) {
t.Error("Timeout not set properly")
}
if len(req.msgSets) != 2 {
if len(req.records) != 2 {
t.Error("Wrong number of topics in request")
}
}
@@ -166,7 +167,7 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
t.Error("Wrong request version")
}
for _, msgBlock := range req.msgSets["t1"][0].Messages {
for _, msgBlock := range req.records["t1"][0].msgSet.Messages {
msg := msgBlock.Msg
err := msg.decodeSet()
if err != nil {
@@ -183,3 +184,65 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
}
}
}
func TestProduceSetV3RequestBuilding(t *testing.T) {
parent, ps := makeProduceSet()
parent.conf.Producer.RequiredAcks = WaitForAll
parent.conf.Producer.Timeout = 10 * time.Second
parent.conf.Version = V0_11_0_0
now := time.Now()
msg := &ProducerMessage{
Topic: "t1",
Partition: 0,
Key: StringEncoder(TestMessage),
Value: StringEncoder(TestMessage),
Headers: []RecordHeader{
RecordHeader{
Key: []byte("header-1"),
Value: []byte("value-1"),
},
RecordHeader{
Key: []byte("header-2"),
Value: []byte("value-2"),
},
RecordHeader{
Key: []byte("header-3"),
Value: []byte("value-3"),
},
},
Timestamp: now,
}
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
msg.Timestamp = msg.Timestamp.Add(time.Second)
}
req := ps.buildRequest()
if req.Version != 3 {
t.Error("Wrong request version")
}
batch := req.records["t1"][0].recordBatch
if batch.FirstTimestamp != now {
t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp)
}
for i := 0; i < 10; i++ {
rec := batch.Records[i]
if rec.TimestampDelta != time.Duration(i)*time.Second {
t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta)
}
for j, h := range batch.Records[i].Headers {
exp := fmt.Sprintf("header-%d", j+1)
if string(h.Key) != exp {
t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key)
}
exp = fmt.Sprintf("value-%d", j+1)
if string(h.Value) != exp {
t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value)
}
}
}
}

View File

@@ -7,8 +7,11 @@ import (
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
var errVarintOverflow = PacketDecodingError{"varint overflow"}
var errInvalidBool = PacketDecodingError{"invalid bool"}
type realDecoder struct {
raw []byte
@@ -58,12 +61,26 @@ func (rd *realDecoder) getInt64() (int64, error) {
return tmp, nil
}
func (rd *realDecoder) getVarint() (int64, error) {
tmp, n := binary.Varint(rd.raw[rd.off:])
if n == 0 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
if n < 0 {
rd.off -= n
return -1, errVarintOverflow
}
rd.off += n
return tmp, nil
}
func (rd *realDecoder) getArrayLength() (int, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
rd.off += 4
if tmp > rd.remaining() {
rd.off = len(rd.raw)
@@ -74,60 +91,84 @@ func (rd *realDecoder) getArrayLength() (int, error) {
return tmp, nil
}
func (rd *realDecoder) getBool() (bool, error) {
b, err := rd.getInt8()
if err != nil || b == 0 {
return false, err
}
if b != 1 {
return false, errInvalidBool
}
return true, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
tmp, err := rd.getInt32()
if err != nil {
return nil, err
}
if tmp == -1 {
return nil, nil
}
n := int(tmp)
return rd.getRawBytes(int(tmp))
}
func (rd *realDecoder) getVarintBytes() ([]byte, error) {
tmp, err := rd.getVarint()
if err != nil {
return nil, err
}
if tmp == -1 {
return nil, nil
}
return rd.getRawBytes(int(tmp))
}
func (rd *realDecoder) getStringLength() (int, error) {
length, err := rd.getInt16()
if err != nil {
return 0, err
}
n := int(length)
switch {
case n < -1:
return nil, errInvalidByteSliceLength
case n == -1:
return nil, nil
case n == 0:
return make([]byte, 0), nil
return 0, errInvalidStringLength
case n > rd.remaining():
rd.off = len(rd.raw)
return nil, ErrInsufficientData
return 0, ErrInsufficientData
}
tmpStr := rd.raw[rd.off : rd.off+n]
rd.off += n
return tmpStr, nil
return n, nil
}
func (rd *realDecoder) getString() (string, error) {
tmp, err := rd.getInt16()
if err != nil {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return "", err
}
n := int(tmp)
switch {
case n < -1:
return "", errInvalidStringLength
case n == -1:
return "", nil
case n == 0:
return "", nil
case n > rd.remaining():
rd.off = len(rd.raw)
return "", ErrInsufficientData
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getNullableString() (*string, error) {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return nil, err
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return &tmpStr, err
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
@@ -221,8 +262,16 @@ func (rd *realDecoder) remaining() int {
}
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
buf, err := rd.getRawBytes(length)
if err != nil {
return nil, err
}
return &realDecoder{raw: buf}, nil
}
func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
if length < 0 {
return nil, errInvalidSubsetSize
return nil, errInvalidByteSliceLength
} else if length > rd.remaining() {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
@@ -230,7 +279,15 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
start := rd.off
rd.off += length
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
return rd.raw[start:rd.off], nil
}
func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
if rd.remaining() < offset+length {
return nil, ErrInsufficientData
}
off := rd.off + offset
return &realDecoder{raw: rd.raw[off : off+length]}, nil
}
// stacks
@@ -238,10 +295,17 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
func (rd *realDecoder) push(in pushDecoder) error {
in.saveOffset(rd.off)
reserve := in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
var reserve int
if dpd, ok := in.(dynamicPushDecoder); ok {
if err := dpd.decode(rd); err != nil {
return err
}
} else {
reserve = in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
}
}
rd.stack = append(rd.stack, in)

View File

@@ -35,11 +35,23 @@ func (re *realEncoder) putInt64(in int64) {
re.off += 8
}
func (re *realEncoder) putVarint(in int64) {
re.off += binary.PutVarint(re.raw[re.off:], in)
}
func (re *realEncoder) putArrayLength(in int) error {
re.putInt32(int32(in))
return nil
}
func (re *realEncoder) putBool(in bool) {
if in {
re.putInt8(1)
return
}
re.putInt8(0)
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {
@@ -54,9 +66,16 @@ func (re *realEncoder) putBytes(in []byte) error {
return nil
}
re.putInt32(int32(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
return re.putRawBytes(in)
}
func (re *realEncoder) putVarintBytes(in []byte) error {
if in == nil {
re.putVarint(-1)
return nil
}
re.putVarint(int64(len(in)))
return re.putRawBytes(in)
}
func (re *realEncoder) putString(in string) error {
@@ -66,6 +85,14 @@ func (re *realEncoder) putString(in string) error {
return nil
}
func (re *realEncoder) putNullableString(in *string) error {
if in == nil {
re.putInt16(-1)
return nil
}
return re.putString(*in)
}
func (re *realEncoder) putStringArray(in []string) error {
err := re.putArrayLength(len(in))
if err != nil {

113
vendor/github.com/Shopify/sarama/record.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
package sarama
import (
"encoding/binary"
"time"
)
const (
controlMask = 0x20
maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
)
type RecordHeader struct {
Key []byte
Value []byte
}
func (h *RecordHeader) encode(pe packetEncoder) error {
if err := pe.putVarintBytes(h.Key); err != nil {
return err
}
return pe.putVarintBytes(h.Value)
}
func (h *RecordHeader) decode(pd packetDecoder) (err error) {
if h.Key, err = pd.getVarintBytes(); err != nil {
return err
}
if h.Value, err = pd.getVarintBytes(); err != nil {
return err
}
return nil
}
type Record struct {
Attributes int8
TimestampDelta time.Duration
OffsetDelta int64
Key []byte
Value []byte
Headers []*RecordHeader
length varintLengthField
}
func (r *Record) encode(pe packetEncoder) error {
pe.push(&r.length)
pe.putInt8(r.Attributes)
pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
pe.putVarint(r.OffsetDelta)
if err := pe.putVarintBytes(r.Key); err != nil {
return err
}
if err := pe.putVarintBytes(r.Value); err != nil {
return err
}
pe.putVarint(int64(len(r.Headers)))
for _, h := range r.Headers {
if err := h.encode(pe); err != nil {
return err
}
}
return pe.pop()
}
func (r *Record) decode(pd packetDecoder) (err error) {
if err = pd.push(&r.length); err != nil {
return err
}
if r.Attributes, err = pd.getInt8(); err != nil {
return err
}
timestamp, err := pd.getVarint()
if err != nil {
return err
}
r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
if r.OffsetDelta, err = pd.getVarint(); err != nil {
return err
}
if r.Key, err = pd.getVarintBytes(); err != nil {
return err
}
if r.Value, err = pd.getVarintBytes(); err != nil {
return err
}
numHeaders, err := pd.getVarint()
if err != nil {
return err
}
if numHeaders >= 0 {
r.Headers = make([]*RecordHeader, numHeaders)
}
for i := int64(0); i < numHeaders; i++ {
hdr := new(RecordHeader)
if err := hdr.decode(pd); err != nil {
return err
}
r.Headers[i] = hdr
}
return pd.pop()
}

265
vendor/github.com/Shopify/sarama/record_batch.go generated vendored Normal file
View File

@@ -0,0 +1,265 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
const recordBatchOverhead = 49
type recordsArray []*Record
func (e recordsArray) encode(pe packetEncoder) error {
for _, r := range e {
if err := r.encode(pe); err != nil {
return err
}
}
return nil
}
func (e recordsArray) decode(pd packetDecoder) error {
for i := range e {
rec := &Record{}
if err := rec.decode(pd); err != nil {
return err
}
e[i] = rec
}
return nil
}
type RecordBatch struct {
FirstOffset int64
PartitionLeaderEpoch int32
Version int8
Codec CompressionCodec
Control bool
LastOffsetDelta int32
FirstTimestamp time.Time
MaxTimestamp time.Time
ProducerID int64
ProducerEpoch int16
FirstSequence int32
Records []*Record
PartialTrailingRecord bool
compressedRecords []byte
recordsLen int // uncompressed records size
}
func (b *RecordBatch) encode(pe packetEncoder) error {
if b.Version != 2 {
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
}
pe.putInt64(b.FirstOffset)
pe.push(&lengthField{})
pe.putInt32(b.PartitionLeaderEpoch)
pe.putInt8(b.Version)
pe.push(newCRC32Field(crcCastagnoli))
pe.putInt16(b.computeAttributes())
pe.putInt32(b.LastOffsetDelta)
if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
return err
}
if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
return err
}
pe.putInt64(b.ProducerID)
pe.putInt16(b.ProducerEpoch)
pe.putInt32(b.FirstSequence)
if err := pe.putArrayLength(len(b.Records)); err != nil {
return err
}
if b.compressedRecords == nil {
if err := b.encodeRecords(pe); err != nil {
return err
}
}
if err := pe.putRawBytes(b.compressedRecords); err != nil {
return err
}
if err := pe.pop(); err != nil {
return err
}
return pe.pop()
}
func (b *RecordBatch) decode(pd packetDecoder) (err error) {
if b.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
batchLen, err := pd.getInt32()
if err != nil {
return err
}
if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
return err
}
if b.Version, err = pd.getInt8(); err != nil {
return err
}
if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
return err
}
attributes, err := pd.getInt16()
if err != nil {
return err
}
b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
b.Control = attributes&controlMask == controlMask
if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
return err
}
if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
return err
}
if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
return err
}
if b.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if b.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if b.FirstSequence, err = pd.getInt32(); err != nil {
return err
}
numRecs, err := pd.getArrayLength()
if err != nil {
return err
}
if numRecs >= 0 {
b.Records = make([]*Record, numRecs)
}
bufSize := int(batchLen) - recordBatchOverhead
recBuffer, err := pd.getRawBytes(bufSize)
if err != nil {
if err == ErrInsufficientData {
b.PartialTrailingRecord = true
b.Records = nil
return nil
}
return err
}
if err = pd.pop(); err != nil {
return err
}
switch b.Codec {
case CompressionNone:
case CompressionGZIP:
reader, err := gzip.NewReader(bytes.NewReader(recBuffer))
if err != nil {
return err
}
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
return err
}
case CompressionSnappy:
if recBuffer, err = snappy.Decode(recBuffer); err != nil {
return err
}
case CompressionLZ4:
reader := lz4.NewReader(bytes.NewReader(recBuffer))
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)}
}
b.recordsLen = len(recBuffer)
err = decode(recBuffer, recordsArray(b.Records))
if err == ErrInsufficientData {
b.PartialTrailingRecord = true
b.Records = nil
return nil
}
return err
}
func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
var raw []byte
if b.Codec != CompressionNone {
var err error
if raw, err = encode(recordsArray(b.Records), nil); err != nil {
return err
}
b.recordsLen = len(raw)
}
switch b.Codec {
case CompressionNone:
offset := pe.offset()
if err := recordsArray(b.Records).encode(pe); err != nil {
return err
}
b.recordsLen = pe.offset() - offset
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)
if _, err := writer.Write(raw); err != nil {
return err
}
if err := writer.Close(); err != nil {
return err
}
b.compressedRecords = buf.Bytes()
case CompressionSnappy:
b.compressedRecords = snappy.Encode(raw)
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err := writer.Write(raw); err != nil {
return err
}
if err := writer.Close(); err != nil {
return err
}
b.compressedRecords = buf.Bytes()
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
}
return nil
}
func (b *RecordBatch) computeAttributes() int16 {
attr := int16(b.Codec) & int16(compressionCodecMask)
if b.Control {
attr |= controlMask
}
return attr
}
func (b *RecordBatch) addRecord(r *Record) {
b.Records = append(b.Records, r)
}

284
vendor/github.com/Shopify/sarama/record_test.go generated vendored Normal file
View File

@@ -0,0 +1,284 @@
package sarama
import (
"reflect"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
)
var recordBatchTestCases = []struct {
name string
batch RecordBatch
encoded []byte
oldGoEncoded []byte // used in case of gzipped content for go versions prior to 1.8
}{
{
name: "empty record",
batch: RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(0, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{},
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 49, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
89, 95, 183, 221, // CRC
0, 0, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 0, // Number of Records
},
},
{
name: "control batch",
batch: RecordBatch{
Version: 2,
Control: true,
FirstTimestamp: time.Unix(0, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{},
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 49, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
81, 46, 67, 217, // CRC
0, 32, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 0, // Number of Records
},
},
{
name: "uncompressed record",
batch: RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 70, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
84, 121, 97, 253, // CRC
0, 0, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
40, // Record Length
0, // Attributes
10, // Timestamp Delta
0, // Offset Delta
8, // Key Length
1, 2, 3, 4,
6, // Value Length
5, 6, 7,
2, // Number of Headers
6, // Header Key Length
8, 9, 10, // Header Key
4, // Header Value Length
11, 12, // Header Value
},
},
{
name: "gzipped record",
batch: RecordBatch{
Version: 2,
Codec: CompressionGZIP,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 94, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
159, 236, 182, 189, // CRC
0, 1, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
},
oldGoEncoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 94, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
0, 216, 14, 210, // CRC
0, 1, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
},
},
{
name: "snappy compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionSnappy,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 72, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
21, 0, 159, 97, // CRC
0, 2, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
21, 80, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12,
},
},
{
name: "lz4 compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionLZ4,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 89, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
169, 74, 119, 197, // CRC
0, 3, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
4, 34, 77, 24, 100, 112, 185, 21, 0, 0, 128, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2,
6, 8, 9, 10, 4, 11, 12, 0, 0, 0, 0, 12, 59, 239, 146,
},
},
}
func isOldGo(t *testing.T) bool {
v := strings.Split(runtime.Version()[2:], ".")
if len(v) < 2 {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
maj, err := strconv.Atoi(v[0])
if err != nil {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
min, err := strconv.Atoi(v[1])
if err != nil {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
return maj < 1 || (maj == 1 && min < 8)
}
func TestRecordBatchEncoding(t *testing.T) {
for _, tc := range recordBatchTestCases {
if tc.oldGoEncoded != nil && isOldGo(t) {
testEncodable(t, tc.name, &tc.batch, tc.oldGoEncoded)
} else {
testEncodable(t, tc.name, &tc.batch, tc.encoded)
}
}
}
func TestRecordBatchDecoding(t *testing.T) {
for _, tc := range recordBatchTestCases {
batch := RecordBatch{}
testDecodable(t, tc.name, &batch, tc.encoded)
for _, r := range batch.Records {
r.length = varintLengthField{}
}
for _, r := range tc.batch.Records {
r.length = varintLengthField{}
}
if !reflect.DeepEqual(batch, tc.batch) {
t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch))
}
}
}

167
vendor/github.com/Shopify/sarama/records.go generated vendored Normal file
View File

@@ -0,0 +1,167 @@
package sarama
import "fmt"
const (
unknownRecords = iota
legacyRecords
defaultRecords
magicOffset = 16
magicLength = 1
)
// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
type Records struct {
recordsType int
msgSet *MessageSet
recordBatch *RecordBatch
}
func newLegacyRecords(msgSet *MessageSet) Records {
return Records{recordsType: legacyRecords, msgSet: msgSet}
}
func newDefaultRecords(batch *RecordBatch) Records {
return Records{recordsType: defaultRecords, recordBatch: batch}
}
// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
// The first return value indicates whether both fields are nil (and the type is not set).
// If both fields are not nil, it returns an error.
func (r *Records) setTypeFromFields() (bool, error) {
if r.msgSet == nil && r.recordBatch == nil {
return true, nil
}
if r.msgSet != nil && r.recordBatch != nil {
return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
}
r.recordsType = defaultRecords
if r.msgSet != nil {
r.recordsType = legacyRecords
}
return false, nil
}
func (r *Records) encode(pe packetEncoder) error {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return err
}
}
switch r.recordsType {
case legacyRecords:
if r.msgSet == nil {
return nil
}
return r.msgSet.encode(pe)
case defaultRecords:
if r.recordBatch == nil {
return nil
}
return r.recordBatch.encode(pe)
}
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) setTypeFromMagic(pd packetDecoder) error {
dec, err := pd.peek(magicOffset, magicLength)
if err != nil {
return err
}
magic, err := dec.getInt8()
if err != nil {
return err
}
r.recordsType = defaultRecords
if magic < 2 {
r.recordsType = legacyRecords
}
return nil
}
func (r *Records) decode(pd packetDecoder) error {
if r.recordsType == unknownRecords {
if err := r.setTypeFromMagic(pd); err != nil {
return nil
}
}
switch r.recordsType {
case legacyRecords:
r.msgSet = &MessageSet{}
return r.msgSet.decode(pd)
case defaultRecords:
r.recordBatch = &RecordBatch{}
return r.recordBatch.decode(pd)
}
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) numRecords() (int, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return 0, err
}
}
switch r.recordsType {
case legacyRecords:
if r.msgSet == nil {
return 0, nil
}
return len(r.msgSet.Messages), nil
case defaultRecords:
if r.recordBatch == nil {
return 0, nil
}
return len(r.recordBatch.Records), nil
}
return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) isPartial() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case unknownRecords:
return false, nil
case legacyRecords:
if r.msgSet == nil {
return false, nil
}
return r.msgSet.PartialTrailingMessage, nil
case defaultRecords:
if r.recordBatch == nil {
return false, nil
}
return r.recordBatch.PartialTrailingRecord, nil
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) isControl() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case legacyRecords:
return false, nil
case defaultRecords:
if r.recordBatch == nil {
return false, nil
}
return r.recordBatch.Control, nil
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}

143
vendor/github.com/Shopify/sarama/records_test.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
package sarama
import (
"bytes"
"reflect"
"testing"
)
func TestLegacyRecords(t *testing.T) {
set := &MessageSet{
Messages: []*MessageBlock{
{
Msg: &Message{
Version: 1,
},
},
},
}
r := newLegacyRecords(set)
exp, err := encode(set, nil)
if err != nil {
t.Fatal(err)
}
buf, err := encode(&r, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, exp) {
t.Errorf("Wrong encoding for legacy records, wanted %v, got %v", exp, buf)
}
set = &MessageSet{}
r = Records{}
err = decode(exp, set)
if err != nil {
t.Fatal(err)
}
err = decode(buf, &r)
if err != nil {
t.Fatal(err)
}
if r.recordsType != legacyRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords)
}
if !reflect.DeepEqual(set, r.msgSet) {
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.msgSet)
}
n, err := r.numRecords()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("Wrong number of records, wanted 1, got %d", n)
}
p, err := r.isPartial()
if err != nil {
t.Fatal(err)
}
if p {
t.Errorf("MessageSet shouldn't have a partial trailing message")
}
c, err := r.isControl()
if err != nil {
t.Fatal(err)
}
if c {
t.Errorf("MessageSet can't be a control batch")
}
}
func TestDefaultRecords(t *testing.T) {
batch := &RecordBatch{
Version: 2,
Records: []*Record{
{
Value: []byte{1},
},
},
}
r := newDefaultRecords(batch)
exp, err := encode(batch, nil)
if err != nil {
t.Fatal(err)
}
buf, err := encode(&r, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, exp) {
t.Errorf("Wrong encoding for default records, wanted %v, got %v", exp, buf)
}
batch = &RecordBatch{}
r = Records{}
err = decode(exp, batch)
if err != nil {
t.Fatal(err)
}
err = decode(buf, &r)
if err != nil {
t.Fatal(err)
}
if r.recordsType != defaultRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords)
}
if !reflect.DeepEqual(batch, r.recordBatch) {
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.recordBatch)
}
n, err := r.numRecords()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("Wrong number of records, wanted 1, got %d", n)
}
p, err := r.isPartial()
if err != nil {
t.Fatal(err)
}
if p {
t.Errorf("RecordBatch shouldn't have a partial trailing record")
}
c, err := r.isControl()
if err != nil {
t.Fatal(err)
}
if c {
t.Errorf("RecordBatch shouldn't be a control batch")
}
}

View File

@@ -114,6 +114,8 @@ func allocateBody(key, version int16) protocolBody {
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
case 37:
return &CreatePartitionsRequest{}
}
return nil
}

40
vendor/github.com/Shopify/sarama/timestamp.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
package sarama
import (
"fmt"
"time"
)
type Timestamp struct {
*time.Time
}
func (t Timestamp) encode(pe packetEncoder) error {
timestamp := int64(-1)
if !t.Before(time.Unix(0, 0)) {
timestamp = t.UnixNano() / int64(time.Millisecond)
} else if !t.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
}
pe.putInt64(timestamp)
return nil
}
func (t Timestamp) decode(pd packetDecoder) error {
millis, err := pd.getInt64()
if err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
*t.Time = timestamp
return nil
}

View File

@@ -2,7 +2,9 @@ package sarama
import (
"bufio"
"fmt"
"net"
"regexp"
)
type none struct{}
@@ -146,5 +148,37 @@ var (
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
minVersion = V0_8_2_0
)
func ParseKafkaVersion(s string) (KafkaVersion, error) {
var major, minor, veryMinor, patch uint
var err error
if s[0] == '0' {
err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
} else {
err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
}
if err != nil {
return minVersion, err
}
return newKafkaVersion(major, minor, veryMinor, patch), nil
}
func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
if !regexp.MustCompile(pattern).MatchString(s) {
return fmt.Errorf("invalid version `%s`", s)
}
_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
return err
}
func (v KafkaVersion) String() string {
if v.version[0] == 0 {
return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
} else {
return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
}
}

View File

@@ -19,3 +19,23 @@ func TestVersionCompare(t *testing.T) {
t.Error("0.8.2.1 >= 0.10.0.0")
}
}
func TestVersionParsing(t *testing.T) {
validVersions := []string{"0.8.2.0", "0.8.2.1", "0.9.0.0", "0.10.2.0", "1.0.0"}
for _, s := range validVersions {
v, err := ParseKafkaVersion(s)
if err != nil {
t.Errorf("could not parse valid version %s: %s", s, err)
}
if v.String() != s {
t.Errorf("version %s != %s", v.String(), s)
}
}
invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"}
for _, s := range invalidVersions {
if _, err := ParseKafkaVersion(s); err == nil {
t.Errorf("invalid version %s parsed without error", s)
}
}
}

View File

@@ -6,7 +6,7 @@ TOXIPROXY_VERSION=2.0.0
mkdir -p ${KAFKA_INSTALL_ROOT}
if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.10-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
fi
if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}

View File

@@ -320,6 +320,7 @@ project.lock.json
/test/haxe/bin
/test/hs/TestClient
/test/hs/TestServer
/test/php/php_ext_dir/
/test/py.twisted/_trial_temp/
/test/rb/Gemfile.lock
/test/netcore/**/.vs
@@ -327,6 +328,7 @@ project.lock.json
/test/netcore/**/obj
/test/netcore/**/gen-*
/test/netcore/Thrift
/test/php/php_ext_dir/
/test/rs/Cargo.lock
/test/rs/src/thrift_test.rs
/test/rs/bin/

View File

@@ -19,133 +19,178 @@
# build Apache Thrift on Travis CI - https://travis-ci.org/
#
# Docker Integration
# see: build/docker/README.md
#
sudo: required
dist: trusty
language: cpp
services:
- docker
install:
- (travis_wait ./build/docker/check_unmodified.sh $DISTRO && touch .unmodified) || true
- if [ ! -f .unmodified ]; then travis_retry travis_wait docker build -q -t thrift-build:$DISTRO build/docker/$DISTRO; fi
- if [[ `uname` == "Linux" ]]; then build/docker/refresh.sh; fi
script:
- docker run --net=host -e BUILD_LIBS="$BUILD_LIBS" $BUILD_ENV -v $(pwd):/thrift/src -it thrift-build:$DISTRO build/docker/scripts/$SCRIPT $BUILD_ARG
stages:
# - osx # up front for now (for testing)
- docker # docker images
- thrift # thrift build jobs
env:
global:
- TEST_NAME=""
- SCRIPT="cmake.sh"
- BUILD_ARG=""
- BUILD_ENV="-e CC=clang -e CXX=clang++"
- DISTRO=ubuntu-xenial
- BUILD_LIBS="CPP C_GLIB HASKELL JAVA PYTHON TESTING TUTORIALS" # only meaningful for CMake builds
- TRAVIS_BUILD_STAGE=test
# DOCKER_REPO (this works for all builds as a source for docker images - you can override for fork builds in your Travis settings)
- DOCKER_REPO="thrift/thrift-build"
# DOCKER_USER (provide in your Travis settings if you want to build and update docker images once, instead of on every job)
# DOCKER_PASS (same)
matrix:
- TEST_NAME="Cross Language Tests (Binary Protocol)"
SCRIPT="cross-test.sh"
BUILD_ARG="-'(binary)'"
BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
jobs:
include:
# ------------------------- phase: osx --------------------------
# - stage: osx
# os: osx
# osx_image: xcode9
# script: build/docker/scripts/autotools.sh
- TEST_NAME="Cross Language Tests (Header, JSON Protocols)"
SCRIPT="cross-test.sh"
BUILD_ARG="-'(header|json)'"
BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
# ========================= stage: docker =========================
- stage: docker
script: true
env:
- JOB="Docker Build ubuntu-trusty 14.04"
- DISTRO=ubuntu-trusty
- TRAVIS_BUILD_STAGE=docker
- script: true
env:
- JOB="Docker Build ubuntu-xenial 16.04"
- DISTRO=ubuntu-xenial
- TRAVIS_BUILD_STAGE=docker
- script: true
env:
- JOB="Docker Build ubuntu-artful 17.10"
- DISTRO=ubuntu-artful
- TRAVIS_BUILD_STAGE=docker
- TEST_NAME="Cross Language Tests (Compact and Multiplexed Protocols)"
SCRIPT="cross-test.sh"
BUILD_ARG="-'(compact|multiplexed)'"
BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
# ========================= stage: thrift =======================
# ------------------------- phase: cross ------------------------
# apache/thrift official PR builds can exceed 50 minutes per job so combine all cross tests
- stage: thrift
script: build/docker/run.sh
if: repo = apache/thrift
env:
- JOB="Cross Language Tests"
- SCRIPT="cross-test.sh"
- BUILD_ARG=""
- BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
# Autotools builds
# TODO: Remove them once migrated to CMake
# centos-7.3 build jobs appear to be unstable/hang...
# TEST_NAME="Autotools (CentOS 7.3)"
# DISTRO=centos-7.3
# SCRIPT="autotools.sh"
# BUILD_ENV="-e CC=gcc -e CXX=g++"
# BUILD_ARG="--without-cpp --without-csharp --without-c_glib --without-d -without-dart --without-erlang --without-go --without-haskell --without-haxe"
# fork based PR builds cannot exceed 50 minutes per job
- stage: thrift
script: build/docker/run.sh
if: repo != apache/thrift
env:
- JOB="Cross Language Tests (Binary Protocol)"
- SCRIPT="cross-test.sh"
- BUILD_ARG="-'(binary)'"
- BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
- TEST_NAME="Autotools (Ubuntu Xenial)"
SCRIPT="autotools.sh"
BUILD_ENV="-e CC=gcc -e CXX=g++"
BUILD_ARG="--enable-plugin" # --without-java --without-lua --without-nodejs --without-perl --without-php --without-php_extension --without-python --without-py3 --without-ruby --without-rust"
- stage: thrift
script: build/docker/run.sh
if: repo != apache/thrift
env:
- JOB="Cross Language Tests (Header, JSON Protocols)"
- SCRIPT="cross-test.sh"
- BUILD_ARG="-'(header|json)'"
- BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
# CMake builds
# centos-7.3 build jobs appear to be unstable/hang...
# TEST_NAME="CMake (CentOS 7.3)"
# DISTRO=centos-7.3
- stage: thrift
script: build/docker/run.sh
if: repo != apache/thrift
env:
- JOB="Cross Language Tests (Compact and Multiplexed Protocols)"
- SCRIPT="cross-test.sh"
- BUILD_ARG="-'(compact|multiplexed)'"
- BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4"
- TEST_NAME="CMake (Ubuntu Xenial)"
# ------------------------- phase: sca --------------------------
# QA jobs for code analytics and metrics
- stage: thrift
script: build/docker/run.sh
env:
- JOB="Static Code Analysis"
- SCRIPT="sca.sh"
- DISTRO=ubuntu-artful
# C and C++ undefined behavior.
# A binary crashes if undefined behavior occurs and produces a stack trace.
# python is disabled, see: THRIFT-4360
- script: build/docker/run.sh
env:
- JOB="UBSan"
- SCRIPT="ubsan.sh"
- DISTRO=ubuntu-artful
- BUILD_ARG="--without-python --without-py3"
# ------------------------- phase: cmake ------------------------
- script: build/docker/run.sh
env:
- JOB="CMake (Ubuntu Xenial)"
# C++ specific options: compiler plug-in, threading model
- TEST_NAME="C++ (Boost Thread)"
BUILD_LIBS="CPP TESTING TUTORIALS"
BUILD_ARG="-DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
- script: build/docker/run.sh
env:
- JOB="C++98 (Boost Thread)"
- SCRIPT="cmake.sh"
- BUILD_LIBS="CPP TESTING TUTORIALS"
- BUILD_ARG="-DCMAKE_CXX_STANDARD=98 -DCMAKE_CXX_STANDARD_REQUIRED=ON -DCMAKE_CXX_EXTENSIONS=OFF --DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
- BUILD_ENV=""
- TEST_NAME="C++ (Std Thread) and Plugin"
BUILD_LIBS="CPP TESTING TUTORIALS"
BUILD_ARG="-DWITH_PLUGIN=ON -DWITH_STDTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
- script: build/docker/run.sh
env:
- JOB="C++ (Std Thread) and Plugin"
- SCRIPT="cmake.sh"
- BUILD_LIBS="CPP TESTING TUTORIALS"
- BUILD_ARG="-DWITH_PLUGIN=ON -DWITH_STDTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF"
- BUILD_ENV="-e CC=clang -e CXX=clang++"
# Distribution
- TEST_NAME="make dist"
SCRIPT="make-dist.sh"
# ------------------------- phase: autotools --------------------
# TODO: Remove them once migrated to CMake
- script: build/docker/run.sh
env:
- JOB="Autotools (Ubuntu Artful)"
- DISTRO=ubuntu-artful
- SCRIPT="autotools.sh"
- BUILD_ENV="-e CC=gcc -e CXX=g++"
- TEST_NAME="Debian Packages"
SCRIPT="dpkg.sh"
- script: build/docker/run.sh
env:
- JOB="Autotools (Ubuntu Xenial)"
- DISTRO=ubuntu-xenial
- SCRIPT="autotools.sh"
- BUILD_ENV="-e CC=gcc -e CXX=g++"
# C and C++ undefined behavior. This wraps autotools.sh, but each binary crashes if
# undefined behavior occurs. Skips the known flaky tests.
# Unstable: THRIFT-4064 needs to be fixed perhaps?
- TEST_NAME="UBSan"
SCRIPT="ubsan.sh"
BUILD_ARG="--without-haskell --without-nodejs --without-perl --without-python"
UNSTABLE=true
- script: build/docker/run.sh
env:
- JOB="Autotools (Ubuntu Trusty)"
- DISTRO=ubuntu-trusty
- SCRIPT="autotools.sh"
- BUILD_ENV="-e CC=gcc -e CXX=g++"
matrix:
allow_failures:
- env: UNSTABLE=true
include:
# QA jobs for code analytics and metrics
#
# C/C++ static code analysis with cppcheck
# add --error-exitcode=1 to --enable=all as soon as everything is fixed
#
# Python code style check with flake8
#
# search for TODO etc within source tree
# some statistics about the code base
# some info about the build machine
- env: TEST_NAME="cppcheck, flake8, TODO FIXME HACK, LoC and system info"
install:
- travis_retry sudo apt-get update
- travis_retry sudo apt-get install -ym cppcheck sloccount python-flake8
script:
# Compiler cppcheck (All)
- cppcheck --force --quiet --inline-suppr --enable=all -j2 compiler/cpp/src
# C++ cppcheck (All)
- cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp
# C Glib cppcheck (All)
- cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib
# Silent error checks
- cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 compiler/cpp/src
- cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp
- cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib
# Python code style
- flake8 --ignore=E501 lib/py
- flake8 tutorial/py
- flake8 --ignore=E501 test/py
- flake8 test/py.twisted
- flake8 test/py.tornado
- flake8 --ignore=E501 test/test.py
- flake8 --ignore=E501 test/crossrunner
- flake8 test/features
# TODO etc
- grep -r TODO *
- grep -r FIXME *
- grep -r HACK *
# LoC
- sloccount .
# System Info
- dpkg -l
- uname -a
# ------------------------- phase: dist -------------------------
- script: build/docker/run.sh
env:
- JOB="make dist"
- SCRIPT="make-dist.sh"
- BUILD_ENV="-e CC=gcc -e CXX=g++"
- script: build/docker/run.sh
env:
- JOB="Debian Packages"
- SCRIPT="dpkg.sh"
- BUILD_ENV="-e CC=gcc -e CXX=g++"

View File

@@ -1,5 +1,295 @@
Apache Thrift Changelog
Thrift 0.11.0
--------------------------------------------------------------------------------
## Sub-task
* [THRIFT-2733] - Erlang coding standards
* [THRIFT-2740] - Perl coding standards
* [THRIFT-3610] - Streamline exception handling in Python server handler
* [THRIFT-3686] - Java processor should report internal error on uncaught exception
* [THRIFT-4049] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types
* [THRIFT-4053] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types
* [THRIFT-4136] - Align is_binary() method with is_string() to simplify those checks
* [THRIFT-4137] - Fix remaining undefined behavior invalid vptr casts in Thrift Compiler
* [THRIFT-4138] - Fix remaining undefined behavior invalid vptr casts in C++ library
* [THRIFT-4296] - Fix Ubuntu Xenial build environment for the python language
* [THRIFT-4298] - Fix Ubuntu Xenial build environment for the go 1.6 language
* [THRIFT-4299] - Fix Ubuntu Xenial build environment for the D language
* [THRIFT-4300] - Fix make cross in Ubuntu Xenial docker environment, once all language support issues are fixed
* [THRIFT-4302] - Fix Ubuntu Xenial make cross testing for lua and php7
* [THRIFT-4398] - Update EXTRA_DIST for "make dist"
## Bug
* [THRIFT-381] - Fail fast if configure detects C++ problems
* [THRIFT-1677] - MinGW support broken
* [THRIFT-1805] - Thrift should not swallow ALL exceptions
* [THRIFT-2026] - Fix TCompactProtocol 64 bit builds
* [THRIFT-2642] - Recursive structs don't work in python
* [THRIFT-2889] - stable release 0.9.2, erlang tutorial broken
* [THRIFT-2913] - Ruby Server Thrift::ThreadPoolServer should serve inside a thread
* [THRIFT-2998] - Node.js: Missing header from http request
* [THRIFT-3000] - .NET implementation has trouble with mixed IP modes
* [THRIFT-3281] - Travis CI build passed but the log says BUILD FAILED
* [THRIFT-3358] - Makefile:1362: *** missing separator. Stop.
* [THRIFT-3600] - Make TTwisted server send exception on unexpected handler error
* [THRIFT-3602] - Make Tornado server send exception on unexpected handler error
* [THRIFT-3657] - D TFileWriterTransport close should use non-priority send
* [THRIFT-3700] - Go Map has wrong default value when optional
* [THRIFT-3703] - Unions Field Count Does Not Consider Map/Set/List Fields
* [THRIFT-3730] - server log error twice
* [THRIFT-3778] - go client can not pass method parameter to server of other language if no field_id is given
* [THRIFT-3784] - thrift-maven-plugin generates invalid include directories for IDL in dependency JARs
* [THRIFT-3801] - Node Thrift client throws exception with multiplexer and responses that are bigger than a single buffer
* [THRIFT-3821] - TMemoryBuffer buffer may overflow when resizing
* [THRIFT-3832] - Thrift version 0.9.3 example on Windows, Visual Studio, linking errors during compiling
* [THRIFT-3847] - thrift/config.h includes a #define for VERSION which will likely conflict with existing user environment or code
* [THRIFT-3873] - Fix various build warnings when using Visual Studio
* [THRIFT-3891] - TNonblockingServer configured with more than one IO threads does not always return from serve() upon stop()
* [THRIFT-3892] - Thrift uses TLS SNI extension provided by OpenSSL library. Older version of OpenSSL(< 0.9.8f) may create problem because they do not support 'SSL_set_tlsext_host_name()'.
* [THRIFT-3895] - Build fails using Java 1.8 with Ant < 1.9
* [THRIFT-3896] - map<string,string> data with number string key cannot access that deserialized by php extension
* [THRIFT-3938] - Python TNonblockingServer does not work with SSL
* [THRIFT-3944] - TSSLSocket has dead code in checkHandshake
* [THRIFT-3946] - Java 1.5 compatibility broken for binary fields (java5 option)
* [THRIFT-3960] - Inherited services in Lua generator are not named correctly
* [THRIFT-3962] - Ant build.xml broken on Windows for Java library
* [THRIFT-3963] - Thrift.cabal filename does not match module name
* [THRIFT-3967] - gobject/gparam.h:166:33: warning: enumerator value for G_PARAM_DEPRECATED is not an integer constant expression
* [THRIFT-3968] - Deserializing empty string/binary fields
* [THRIFT-3974] - Using clang-3.8 and ThreadSanitizer on the concurrency_test claims bad PThread behavior
* [THRIFT-3984] - PHP7 extension causes segfault
* [THRIFT-4008] - broken ci due to upstream dependency versioning break
* [THRIFT-4009] - Use @implementer instead of implements in TTwisted.py
* [THRIFT-4010] - Q.fcall messing up with *this* pointer inside called function
* [THRIFT-4011] - Sets of Thrift structs generate Go code that can't be serialized to JSON
* [THRIFT-4012] - Python Twisted implementation uses implements, not compatible with Py3
* [THRIFT-4014] - align C# meta data in AssemblyInfo.cs
* [THRIFT-4015] - Fix wrongly spelled "Thirft"s
* [THRIFT-4016] - testInsanity() impl does not conform to test spec in ThriftTest.thrift
* [THRIFT-4023] - Skip unexpected field types on read/write
* [THRIFT-4024] - Skip() should throw on unknown data types
* [THRIFT-4026] - TSSLSocket doesn't work with Python < 2.7.9
* [THRIFT-4029] - Accelerated protocols do not build from thrift-py 0.10.0 on PyPI
* [THRIFT-4031] - Go plugin generates invalid code for lists of typedef'ed built-in types
* [THRIFT-4033] - Default build WITH_PLUGIN=ON for all builds results in packaging errors
* [THRIFT-4034] - CMake doesn't work to build compiler on MacOS
* [THRIFT-4036] - Add .NET Core environment/build support to the docker image
* [THRIFT-4038] - socket check: checking an unsigned number against >= 0 never fails
* [THRIFT-4042] - ExtractionError when using accelerated thrift in a multiprocess test
* [THRIFT-4043] - thrift perl debian package is placing files in the wrong place
* [THRIFT-4044] - Build job 17 failing on every pull request; hspec core (haskell) 2.4 issue
* [THRIFT-4046] - MinGW with gcc 6.2 does not compile on Windows
* [THRIFT-4060] - Thrift printTo ostream overload mechanism breaks down when types are nested
* [THRIFT-4062] - Remove debug print from TServiceClient
* [THRIFT-4065] - Document Perl ForkingServer signal restriction imposed by THRIFT-3848 and remove unnecessary code
* [THRIFT-4068] - A code comment in Java ServerSocket is wrong around accept()
* [THRIFT-4073] - enum files are still being generated with unused imports
* [THRIFT-4076] - Appveyor builds failing because ant 1.9.8 was removed from apache servers
* [THRIFT-4077] - AI_ADDRCONFIG redefined after recent change to PlatformSocket header
* [THRIFT-4079] - Generated perl code that returns structures from included thrift files is missing a necessary use clause
* [THRIFT-4087] - Spurious exception destroying TThreadedServer because of incorrect join() call
* [THRIFT-4102] - TBufferedTransport performance issue since 0.10.0
* [THRIFT-4106] - concurrency_test fails randomly
* [THRIFT-4108] - c_glib thrift ssl has multiple bugs and deprecated functions
* [THRIFT-4109] - Configure Script uses string comparison for versions
* [THRIFT-4129] - C++ TNonblockingServer fd leak when failing to dispatch new connections
* [THRIFT-4131] - Javascript with WebSocket handles oneway methods wrong
* [THRIFT-4134] - Fix remaining undefined behavior invalid vptr casts
* [THRIFT-4140] - Use of non-thread-safe function gmtime()
* [THRIFT-4141] - Installation of haxe in docker files refers to a redirect link and fails
* [THRIFT-4147] - Rust: protocol should accept transports with non-static lifetime
* [THRIFT-4148] - [maven-thrift-plugin] compile error while import a thrift in dependency jar file.
* [THRIFT-4149] - System.out pollutes log files
* [THRIFT-4154] - PHP close() of a TSocket needs to close any type of socket
* [THRIFT-4158] - minor issue in README-MSYS2.md
* [THRIFT-4159] - Building tests fails on MSYS2 (MinGW64) due to a (small?) linker error
* [THRIFT-4160] - TNonblocking server fix use of closed/freed connections
* [THRIFT-4161] - TNonBlocking server using uninitialized event in error paths
* [THRIFT-4162] - TNonBlocking handling of TSockets in error state is incorrect after fd is closed
* [THRIFT-4164] - Core in TSSLSocket cleanupOpenSSL when destroying a mutex used by openssl
* [THRIFT-4165] - C++ build has many warnings under c++03 due to recent changes, cmake needs better platform-independent language level control
* [THRIFT-4166] - Recent fix to remove boost::lexical_cast usage broke VS2010
* [THRIFT-4167] - Missing compile flag
* [THRIFT-4170] - Support lua 5.1 or earlier properly for object length determination
* [THRIFT-4172] - node.js tutorial client does not import assert, connection issues are not handled properly
* [THRIFT-4177] - Java compiler produces deep copy constructor that could make shallow copy instead
* [THRIFT-4184] - Building on Appveyor: invalid escape sequence \L
* [THRIFT-4185] - fb303 counter encoding fix
* [THRIFT-4189] - Framed/buffered transport Dispose() does not dispose the nested transport
* [THRIFT-4193] - Lower the default maxReadBufferBytes for non-blocking servers
* [THRIFT-4195] - Compilation to GO produces broken code
* [THRIFT-4196] - Cannot generate recursive Rust types
* [THRIFT-4204] - typo in compact spec
* [THRIFT-4206] - Strings in container fields are not decoded properly with py:dynamic and py:utf8strings
* [THRIFT-4208] - C# NamedPipesServer not really working in some scenarios
* [THRIFT-4211] - Fix GError glib management under Thrift
* [THRIFT-4212] - c_glib flush tries to close SSL even if socket is invalid
* [THRIFT-4213] - Travis build fails at curl -sSL https://www.npmjs.com/install.sh | sh
* [THRIFT-4215] - Golang TTransportFactory Pattern Squelches Errors
* [THRIFT-4216] - Golang Http Clients Do Not Respect User Options
* [THRIFT-4218] - Set TCP_NODELAY for PHP client socket
* [THRIFT-4219] - Golang HTTP clients created with Nil buffer
* [THRIFT-4231] - TJSONProtocol throws unexpected non-Thrift-exception on null strings
* [THRIFT-4232] - ./configure does bad ant version check
* [THRIFT-4234] - Travis build fails cross language tests with "Unsupported security protocol type"
* [THRIFT-4237] - Go TServerSocket Race Conditions
* [THRIFT-4240] - Go TSimpleServer does not close properly
* [THRIFT-4243] - Go TSimpleServer race on wait in Stop() method
* [THRIFT-4245] - Golang TFramedTransport's writeBuffer increases if writes to transport failed
* [THRIFT-4246] - Sequence number mismatch on multiplexed clients
* [THRIFT-4247] - Compile fails with openssl 1.1
* [THRIFT-4248] - Compile fails - strncpy, memcmp, memset not declared in src/thrift/transport/TSSLSocket.cpp
* [THRIFT-4251] - Java Epoll Selector Bug
* [THRIFT-4257] - Typescript async callbacks do not provide the correct types
* [THRIFT-4258] - Boost/std thread wrapping faultiness
* [THRIFT-4260] - Go context generation issue. Context is parameter in Interface not in implementation
* [THRIFT-4261] - Go context generation issue: breaking change in generated code regarding thrift.TProcessorFunction interface
* [THRIFT-4262] - Invalid binding to InterlockedCompareExchange64() with 64-bit targets
* [THRIFT-4263] - Fix use after free bug for thrown exceptions
* [THRIFT-4266] - Erlang library throws during skipping fields of composite type (maps, lists, structs, sets)
* [THRIFT-4268] - Erlang library emits debugging output in transport layer
* [THRIFT-4273] - erlang:now/0: Deprecated BIF.
* [THRIFT-4274] - Python feature tests for SSL/TLS failing
* [THRIFT-4279] - Wrong path in include directive in generated Thrift sources
* [THRIFT-4283] - TNamedPipeServer race condition in interrupt
* [THRIFT-4284] - File contains a NBSP: lib/nodejs/lib/thrift/web_server.js
* [THRIFT-4290] - C# nullable option generates invalid code for non-required enum field with default value
* [THRIFT-4292] - TimerManager::remove() is not implemented
* [THRIFT-4307] - Make ssl-open timeout effective in golang client
* [THRIFT-4312] - Erlang client cannot connect to Python server: exception error: econnrefused
* [THRIFT-4313] - Program code of the Erlang tutorial files contain syntax errors
* [THRIFT-4316] - TByteBuffer.java will read too much data if a previous read returns fewer bytes than requested
* [THRIFT-4319] - command line switch for "evhttp" incorrectly resolved to anon pipes
* [THRIFT-4323] - range check errors or NPE in edge cases
* [THRIFT-4324] - field names can conflict with local vars in generated code
* [THRIFT-4328] - Travis CI builds are timing out (job 1) and haxe builds are failing since 9/11
* [THRIFT-4329] - c_glib Doesn't have a multiplexed processor
* [THRIFT-4331] - C++: TSSLSockets bug in handling huge messages, bug in handling polling
* [THRIFT-4332] - Binary protocol has memory leaks
* [THRIFT-4334] - Perl indentation incorrect when defaulting field attribute to a struct
* [THRIFT-4339] - Thrift Framed Transport in Erlang crashes server when client disconnects
* [THRIFT-4340] - Erlang fix a crash on client close
* [THRIFT-4355] - Javascript indentation incorrect when defaulting field attribute to a struct
* [THRIFT-4356] - thrift_protocol call Transport cause Segmentation fault
* [THRIFT-4359] - Haxe compiler looks like it is producing incorrect code for map or set key that is binary type
* [THRIFT-4362] - Missing size-check can lead to huge memory allocation
* [THRIFT-4364] - Website contributing guide erroneously recommends submitting patches in JIRA
* [THRIFT-4365] - Perl generated code uses indirect object syntax, which occasionally causes compilation errors.
* [THRIFT-4367] - python TProcessor.process is missing "self"
* [THRIFT-4370] - Ubuntu Artful cppcheck and flake8 are more stringent and causing SCA build job failures
* [THRIFT-4372] - Pipe write operations across a network are limited to 65,535 bytes per write.
* [THRIFT-4374] - cannot load thrift_protocol due to undefined symbol: _ZTVN10__cxxabiv120__si_class_type_infoE
* [THRIFT-4376] - Coverity high impact issue resolution
* [THRIFT-4377] - haxe. socket handles leak in TSimpleServer
* [THRIFT-4381] - Wrong isset bitfield value after transmission
* [THRIFT-4385] - Go remote client -u flag is broken
* [THRIFT-4392] - compiler/..../plugin.thrift structs mis-ordered blows up ocaml generator
* [THRIFT-4395] - Unable to build in the ubuntu-xenial docker image: clap 2.28 requires Rust 1.20
* [THRIFT-4396] - inconsistent (or plain wrong) version numbers in master/trunk
## Documentation
* [THRIFT-4157] - outdated readme about Haxe installation on Linux
## Improvement
* [THRIFT-105] - make a thrift_spec for a structures with negative tags
* [THRIFT-281] - Cocoa library code needs comments, badly
* [THRIFT-775] - performance improvements for Perl
* [THRIFT-2221] - Generate c++ code with std::shared_ptr instead of boost::shared_ptr.
* [THRIFT-2364] - OCaml: Use Oasis exclusively for build process
* [THRIFT-2504] - TMultiplexedProcessor should allow registering default processor called if no service name is present
* [THRIFT-3207] - Enable build with OpenSSL 1.1.0 series
* [THRIFT-3272] - Perl SSL Authentication Support
* [THRIFT-3357] - Generate EnumSet/EnumMap where elements/keys are enums
* [THRIFT-3369] - Implement SSL/TLS support on C with c_glib
* [THRIFT-3467] - Go Maps for Thrift Sets Should Have Values of Type struct{}
* [THRIFT-3580] - THeader for Haskell
* [THRIFT-3627] - Missing basic code style consistency of JavaScript.
* [THRIFT-3706] - There's no support for Multiplexed protocol on c_glib library
* [THRIFT-3766] - Add getUnderlyingTransport() to TZlibTransport
* [THRIFT-3776] - Go code from multiple thrift files with the same namespace
* [THRIFT-3823] - Escape documentation while generating non escaped documetation
* [THRIFT-3854] - allow users to clear read buffers
* [THRIFT-3859] - Unix Domain Socket Support in Objective-C
* [THRIFT-3921] - C++ code should print enums as strings
* [THRIFT-3926] - There should be an error emitted when http status code is not 200
* [THRIFT-4007] - Micro-optimization of TTransport.py
* [THRIFT-4040] - Add real cause of TNonblockingServerSocket error to exception
* [THRIFT-4064] - Update node library dependencies
* [THRIFT-4069] - All perl packages should have proper namespace, version syntax, and use proper thrift exceptions
* [THRIFT-4071] - Consolidate the Travis CI jobs where possible to put less stress on the Apache Foundation's allocation of CI build slaves
* [THRIFT-4072] - Add the possibility to send custom headers in TCurlClient
* [THRIFT-4075] - Better MinGW support for headers-only boost (without thread library)
* [THRIFT-4081] - Provide a MinGW 64-bit Appveyor CI build for better pull request validation
* [THRIFT-4084] - Improve SSL security in thrift by adding a make cross client that checks to make sure SSLv3 protocol cannot be negotiated
* [THRIFT-4095] - Add multiplexed protocol to Travis CI for make cross
* [THRIFT-4099] - Auto-derive Hash for generated Rust structs
* [THRIFT-4110] - The debian build files do not produce a "-dbg" package for debug symbols of libthrift0
* [THRIFT-4114] - Space after '///' in doc comments
* [THRIFT-4126] - Validate objects in php extension
* [THRIFT-4130] - Ensure Apache Http connection is released back to pool after use
* [THRIFT-4151] - Thrift Mutex Contention Profiling (pthreads) should be disabled by default
* [THRIFT-4176] - Implement a threaded and threadpool server type for Rust
* [THRIFT-4183] - Named pipe client blocks forever on Open() when there is no server at the other end
* [THRIFT-4190] - improve C# TThreadPoolServer defaults
* [THRIFT-4197] - Implement transparent gzip compression for HTTP transport
* [THRIFT-4198] - Ruby should log Thrift internal errors to global logger
* [THRIFT-4203] - thrift server stop gracefully
* [THRIFT-4205] - c_glib is not linking against glib + gobject
* [THRIFT-4209] - warning CS0414 in T[TLS]ServerSocket.cs
* [THRIFT-4210] - include Thrift.45.csproj into CI runs
* [THRIFT-4217] - HttpClient should support gzip and deflate
* [THRIFT-4222] - Support Unix Domain Sockets in Golang TServerSocket
* [THRIFT-4233] - Make THsHaServer.invoker available (get method only) in inherited classes
* [THRIFT-4236] - Support context in go generated code.
* [THRIFT-4238] - JSON generator: make annotation-aware
* [THRIFT-4269] - Don't append '.' to Erlang namespace if it ends in '_'.
* [THRIFT-4270] - Generate Erlang mapping functions for const maps and lists
* [THRIFT-4275] - Add support for zope.interface only, apart from twisted support.
* [THRIFT-4285] - Pull generated send/recv into library to allow behaviour to be customised
* [THRIFT-4287] - Add c++ compiler "no_skeleton" flag option
* [THRIFT-4288] - Implement logging levels properly for node.js
* [THRIFT-4295] - Refresh the Docker image file suite for Ubuntu, Debian, and CentOS
* [THRIFT-4305] - Emit ddoc for generated items
* [THRIFT-4306] - Thrift imports not replicated to D service output
* [THRIFT-4315] - Add default message for TApplicationException
* [THRIFT-4318] - Delphi performance improvements
* [THRIFT-4325] - Simplify automake cross compilation by relying on one global THRIFT compiler path
* [THRIFT-4327] - Improve TimerManager API to allow removing specific task
* [THRIFT-4330] - Allow unused crates in Rust files
* [THRIFT-4333] - Erlang tutorial examples are using a different port (9999)
* [THRIFT-4343] - Change CI builds to use node.js 8.x LTS once available
* [THRIFT-4345] - Create a docker build environment that uses the minimum supported language levels
* [THRIFT-4346] - Allow Zlib transport factory to wrap other transports
* [THRIFT-4348] - Perl HTTP Client custom HTTP headers
* [THRIFT-4350] - Update netcore build for dotnet 2.0 sdk and make cross validation
* [THRIFT-4351] - Use Travis CI Build Stages to optimize the CI build
* [THRIFT-4353] - cannot read via thrift_protocol at server side
* [THRIFT-4378] - add set stopTimeoutUnit method to TThreadPoolServer
## New Feature
* [THRIFT-750] - C++ Compiler Virtual Function Option
* [THRIFT-2945] - Implement support for Rust language
* [THRIFT-3857] - thrift js:node complier support an object as parameter not an instance of struct
* [THRIFT-3933] - Port official C# .NET library for Thrift to C# .NET Core libary
* [THRIFT-4039] - Update of Apache Thrift .Net Core lib
* [THRIFT-4113] - Provide a buffer transport for reading/writing in memory byte stream
## Question
* [THRIFT-2956] - autoconf - possibly undefined macro - AC_PROG_BISON
* [THRIFT-4223] - Add support to the isServing() method for the C++ library
## Task
* [THRIFT-3622] - Fix deprecated uses of std::auto_ptr
* [THRIFT-4028] - Please remove System.out.format from the source code
* [THRIFT-4186] - Build and test rust client in Travis
## Test
* [THRIFT-4264] - PHP - Support both shared & static linking of sockets library
## Wish
* [THRIFT-4344] - Define and maintain the minimum language level for all languages in one place
Thrift 0.10.0
--------------------------------------------------------------------------------
## Bug

View File

@@ -1,49 +1,99 @@
## How to contribute
1. Help to review and verify existing patches
1. Make sure your issue is not all ready in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT)
1. If not, create a ticket describing the change you're proposing in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT)
1. Contribute your patch using one of the two methods below
# How to Contribute #
### Contributing via a patch
1. Check out the latest version of the source code
* git clone https://git-wip-us.apache.org/repos/asf/thrift.git thrift
1. Modify the source to include the improvement/bugfix
* Remember to provide *tests* for all submited changes
* When bugfixing: add test that will isolate bug *before* applying change that fixes it
* Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages)
1. Create a patch from project root directory (e.g. you@dev:~/thrift $ ):
* git diff > ../thrift-XXX-my-new-feature.patch
1. Attach the newly generated patch to the issue
1. Wait for other contributors or committers to review your new addition
1. Wait for a committer to commit your patch
### Contributing via GitHub pull requests
1. Create a fork for http://github.com/apache/thrift
1. Create a branch for your changes(best practice is issue as branch name, e.g. THRIFT-9999)
1. Modify the source to include the improvement/bugfix
* Remember to provide *tests* for all submited changes
* When bugfixing: add test that will isolate bug *before* applying change that fixes it
* Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages)
* Verify that your change works on other platforms by adding a GitHub service hook to [Travis CI](http://docs.travis-ci.com/user/getting-started/#Step-one%3A-Sign-in) and [AppVeyor](http://www.appveyor.com/docs)
1. Commit and push changes to your branch (please use issue name and description as commit title, e.g. THRIFT-9999 make it perfect)
1. Issue a pull request with the jira ticket number you are working on in it's name
1. Wait for other contributors or committers to review your new addition
1. Wait for a committer to commit your patch
### More info
Plenty of information on why and how to contribute is available on the Apache Software Foundation (ASF) web site. In particular, we recommend the following:
Thank you for your interest in contributing to the Apache Thrift project! Information on why and how to contribute is available on the Apache Software Foundation (ASF) web site. In particular, we recommend the following to become acquainted with Apache Contributions:
* [Contributors Tech Guide](http://www.apache.org/dev/contributors)
* [Get involved!](http://www.apache.org/foundation/getinvolved.html)
* [Legal aspects on Submission of Contributions (Patches)](http://www.apache.org/licenses/LICENSE-2.0.html#contributions)
## If you want to build the project locally ##
For Windows systems, see our detailed instructions on the [CMake README](/build/cmake/README.md).
For Windows Native C++ builds, see our detailed instructions on the [WinCPP README](/build/wincpp/README.md).
For unix systems, see our detailed instructions on the [Docker README](/build/docker/README.md).
## If you want to review open issues... ##
1. Review the [GitHub Pull Request Backlog](https://github.com/apache/thrift/pulls). Code reviews are open to all.
2. Review the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT). You can search for tickets relating to languages you are interested in or currently using with thrift, for example a Jira search (Issues -> Search For Issues) query of ``project = THRIFT AND component in ("Erlang - Library") and status not in (resolved, closed)`` will locate all the open Erlang Library issues.
## If you discovered a defect... ##
1. Check to see if the issue is already in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT).
1. If not, create a ticket describing the change you're proposing in the Jira issue tracker.
1. Contribute your code changes using the GitHub pull request method:
## Contributing via GitHub pull requests ##
This is the preferred method of submitting changes. When you submit a pull request through github, it activates the continuous integration (CI) build systems at Appveyor and Travis to build your changes on a variety of Windows and Linux configurations and run all the test suites.
1. Create a fork in your GitHub account of http://github.com/apache/thrift
1. Clone the fork to your development system.
1. Create a branch for your changes (best practice is issue as branch name, e.g. THRIFT-9999).
1. Modify the source to include the improvement/bugfix, and:
* Remember to provide *tests* for all submitted changes!
* Use test-driven development (TDD): add a test that will isolate the bug *before* applying a change that fixes it.
* Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages).
* [*optional*] Verify that your change works on other platforms by adding a GitHub service hook to [Travis CI](http://docs.travis-ci.com/user/getting-started/#Step-one%3A-Sign-in) and [AppVeyor](http://www.appveyor.com/docs). You can use this technique to run the Thrift CI jobs in your account to check your changes before they are made public. Every GitHub pull request into Thrift will run the full CI build and test suite on your changes.
1. Squash your changes to a single commit. This is very important as it makes the process of applying your commit upstream much easier.
1. Commit and push changes to your branch (please use issue name and description as commit title, e.g. "THRIFT-9999: make it perfect").
1. Use GitHub to create a pull request going from your branch to apache:master. Ensure that the Jira ticket number is at the beginning of the title of your pull request, same as the commit title.
1. Wait for other contributors or committers to review your new addition, and for a CI build to complete.
1. Wait for a committer to commit your patch. You can nudge the committers if necessary by sending a message to the [Apache Thrift mailing list](https://thrift.apache.org/mailing).
## Contributing via Patch ##
Some changes do not require a build, for example in documentation. For changes that are not code or build related, you can submit a patch on Jira for review. To create a patch from changes in your local directory:
git diff > ../THRIFT-NNNN.patch
then wait for contributors or committers to review your changes, and then for a committer to apply your patch.
## GitHub recipes for Pull Requests ##
Sometimes commmitters may ask you to take actions in your pull requests. Here are some recipes that will help you accomplish those requests. These examples assume you are working on Jira issue THRIFT-9999. You should also be familiar with the [upstream](https://help.github.com/articles/syncing-a-fork/) repository concept.
### Squash your changes ###
If you have not submitted a pull request yet, or if you have not yet rebased your existing pull request, you can squash all your commits down to a single commit. This makes life easier for the committers. If your pull request on GitHub has more than one commit, you should do this.
1. Use the command ``git log`` to identify how many commits you made since you began.
2. Use the command ``git rebase -i HEAD~N`` where N is the number of commits.
3. Leave "pull" in the first line.
4. Change all other lines from "pull" to "fixup".
5. All your changes are now in a single commit.
If you already have a pull request outstanding, you will need to do a "force push" to overwrite it since you changed your commit history:
git push -u origin THRIFT-9999 --force
A more detailed walkthrough of a squash can be found at [Git Ready](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html).
### Rebase your pull request ###
If your pull request has a conflict with master, it needs to be rebased:
git checkout THRIFT-9999
git rebase upstream master
(resolve any conflicts, make sure it builds)
git push -u origin THRIFT-9999 --force
### Fix a bad merge ###
If your pull request contains commits that are not yours, then you should use the following technique to fix the bad merge in your branch:
git checkout master
git pull upstream master
git checkout -b THRIFT-9999-take-2
git cherry-pick ...
(pick only your commits from your original pull request in ascending chronological order)
squash your changes to a single commit if there is more than one (see above)
git push -u origin THRIFT-9999-take-2:THRIFT-9999
This procedure will apply only your commits in order to the current master, then you will squash them to a single commit, and then you force push your local THRIFT-9999-take-2 into remote THRIFT-9999 which represents your pull request, replacing all the commits with the new one.

View File

@@ -1,61 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Goal: provide a thrift-compiler Docker image
#
# Usage:
# docker run -v "${PWD}:/data" thrift/thrift-compiler -gen cpp -o /data/ /data/test/ThriftTest.thrift
#
# further details on docker for thrift is here build/docker/
#
# TODO: push to apache/thrift-compiler instead of thrift/thrift-compiler
FROM debian:jessie
MAINTAINER Apache Thrift <dev@thrift.apache.org>
ENV DEBIAN_FRONTEND noninteractive
ADD . /thrift
RUN buildDeps=" \
flex \
bison \
g++ \
make \
cmake \
curl \
"; \
apt-get update && apt-get install -y --no-install-recommends $buildDeps \
&& mkdir /tmp/cmake-build && cd /tmp/cmake-build \
&& cmake \
-DBUILD_COMPILER=ON \
-DBUILD_LIBRARIES=OFF \
-DBUILD_TESTING=OFF \
-DBUILD_EXAMPLES=OFF \
/thrift \
&& cmake --build . --config Release \
&& make install \
&& curl -k -sSL "https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz" -o /tmp/go.tar.gz \
&& tar xzf /tmp/go.tar.gz -C /tmp \
&& cp /tmp/go/bin/gofmt /usr/bin/gofmt \
&& apt-get purge -y --auto-remove $buildDeps \
&& apt-get clean \
&& rm -rf /tmp/* \
&& rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["thrift"]

305
vendor/github.com/apache/thrift/LANGUAGES.md generated vendored Normal file
View File

@@ -0,0 +1,305 @@
# Apache Thrift Language Support #
Last Modified: 2017-10-05<br>
Version: 0.11.0+
Thrift supports many programming languages and has an impressive test suite that exercises most of the languages, protocols, and transports that represents a matrix of thousands of possible combinations. Each language typically has a minimum required version as well as support libraries - some mandatory and some optional. All of this information is provided below to help you assess whether you can use Apache Thrift with your project. Obviously this is a complex matrix to maintain and may not be correct in all cases - if you spot an error please inform the developers using the mailing list.
Apache Thrift has a choice of two build systems. The `autoconf` build system is the most complete build and is used to build all supported languages. The `cmake` build system has been designated by the project to replace `autoconf` however this transition will take quite some time to complete.
The Language/Library Levels indicate the minimum and maximum versions that are used in the [continuous integration environments](build/docker/README.md) (Appveyor, Travis) for Apache Thrift. Note that while a language may contain support for protocols, transports, and servers, the extent to which each is tested as part of the overall build process varies. The definitive integration test for the project is called the "cross" test which executes a test matrix with clients and servers communicating across languages.
<table style="font-size: 65%; padding: 1px;">
<thead>
<tr>
<th rowspan=2>Language</th>
<th colspan=2 align=center>Build Systems</th>
<th colspan=2 align=center>Lang/Lib Levels</th>
<th colspan=6 align=center>Low-Level Transports</th>
<th colspan=3 align=center>Transport Wrappers</th>
<th colspan=4 align=center>Protocols</th>
<th colspan=5 align=center>Servers</th>
<th rowspan=2>Open Issues</th>
</tr>
<tr>
<!-- Build Systems ---------><th>autoconf</th><th>cmake</th>
<!-- Lang/Lib Levels -------><th>Min</th><th>Max</th>
<!-- Low-Level Transports --><th><a href="https://en.wikipedia.org/wiki/Unix_domain_socket">Domain</a></th><th>&nbsp;File&nbsp;</th><th>Memory</th><th>&nbsp;Pipe&nbsp;</th><th>Socket</th><th>&nbsp;TLS&nbsp;</th>
<!-- Transport Wrappers ----><th>Framed</th><th>&nbsp;http&nbsp;</th><th>&nbsp;zlib&nbsp;</th>
<!-- Protocols -------------><th><a href="doc/specs/thrift-binary-protocol.md">Binary</a></th><th><a href="doc/specs/thrift-compact-protocol.md">Compact</a></th><th>&nbsp;JSON&nbsp;</th><th>Multiplex</th>
<!-- Servers ---------------><th>Forking</th><th>Nonblocking</th><th>Simple</th><th>Threaded</th><th>ThreadPool</th>
</tr>
</thead>
<tbody>
<tr align=center>
<td align=left><a href="lib/as3/README.md">ActionScript</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>ActionScript 3</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313722">ActionScript</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/c_glib/README.md">C (glib)</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Language Levels -------><td>2.40.2</td><td>2.54.0</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313854">C (glib)</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/cpp/README.md">C++</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Language Levels -------><td colspan=2>C++98, gcc </td>
<!-- Low-Level Transports --><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312313">C++</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/csharp/README.md">C#</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>.NET&nbsp;3.5 / mono&nbsp;3.2.8.0</td><td>.NET&nbsp;4.6.1 / mono&nbsp;4.6.2.7</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312362">C# (.NET)</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/cocoa/README.md">Cocoa</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>unknown</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312398">Cocoa</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/d/README.md">D</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>2.070.2</td><td>2.076.0</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12317904">D</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/dart/README.md">Dart</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>1.20.1</td><td>1.24.2</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12328006">Dart</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/delphi/README.md">Delphi</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>2010</td><td>unknown</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12316601">Delphi</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/netcore/README.md">.NET Core</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>2.0.0</td><td>2.0.3</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12331176">.NET Core</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/erl/README.md">Erlang</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>R16B03</td><td>20.0.4</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312390">Erlang</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/go/README.md">Go</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>1.2.1</td><td>1.8.3</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12314307">Go</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/hs/README.md">Haskell</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Language Levels -------><td>7.6.3</td><td>8.0.2</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312704">Haskell</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/haxe/README.md">Haxe</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>3.2.1</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12324347">Haxe</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/java/README.md">Java (SE)</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Language Levels -------><td>1.7.0_151</td><td>1.8.0_144</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312314">Java SE</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/javame/README.md">Java (ME)</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>unknown</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313759">Java ME</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/js/README.md">Javascript</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>unknown</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313418">Javascript</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/lua/README.md">Lua</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>5.1.5</td><td>5.2.4</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12322659">Lua</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/nodejs/README.md">node.js</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>4.2.6</td><td>8.9.1</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12314320">node.js</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/ocaml/README.md">OCaml</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>4.02.3</td><td>4.04.0</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313660">OCaml</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/perl/README.md">Perl</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>5.18.2</td><td>5.26.0</td>
<!-- Low-Level Transports --><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312312">Perl</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/php/README.md">PHP</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>5.5.9</td><td>7.1.8</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312431">PHP</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/py/README.md">Python</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Language Levels -------><td>2.7.6, 3.4.3</td><td>2.7.14, 3.6.3</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312315">Python</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/rb/README.md">Ruby</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>1.9.3p484</td><td>2.3.3p222</td>
<!-- Low-Level Transports --><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12312316">Ruby</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/rs/README.md">Rust</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>1.15.1</td><td>1.18.0</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12331420">Rust</a></td>
</tr>
<tr align=center>
<td align=left><a href="lib/st/README.md">Smalltalk</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>unknown</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Servers ---------------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<td align=left><a href="https://issues.apache.org/jira/browse/THRIFT/component/12313861">Smalltalk</a></td>
</tr>
</tbody>
<tfoot>
<tr>
<th rowspan=2>Language</th>
<!-- Build Systems ---------><th>autoconf</th><th>cmake</th>
<!-- Lang/Lib Levels -------><th>Min</th><th>Max</th>
<!-- Low-Level Transports --><th><a href="https://en.wikipedia.org/wiki/Unix_domain_socket">Domain</a></th></th><th>&nbsp;File&nbsp;</th><th>Memory</th><th>&nbsp;Pipe&nbsp;</th><th>Socket</th><th>&nbsp;TLS&nbsp;</th>
<!-- Transport Wrappers ----><th>Framed</th><th>&nbsp;http&nbsp;</th><th>&nbsp;zlib&nbsp;</th>
<!-- Protocols -------------><th><a href="doc/specs/thrift-binary-protocol.md">Binary</a></th><th><a href="doc/specs/thrift-compact-protocol.md">Compact</a></th><th>&nbsp;JSON&nbsp;</th><th>Multiplex</th>
<!-- Servers ---------------><th>Forking</th><th>Nonblocking</th><th>Simple</th><th>Threaded</th><th>ThreadPool</th>
<th rowspan=2>Open Issues</th>
</tr>
<tr>
<th colspan=2 align=center>Build Systems</th>
<th colspan=2 align=center>Lang/Lib Levels</th>
<th colspan=6 align=center>Low-Level Transports</th>
<th colspan=3 align=center>Transport Wrappers</th>
<th colspan=4 align=center>Protocols</th>
<th colspan=5 align=center>Servers</th>
</tr>
</tfoot>
</table>

View File

@@ -43,7 +43,7 @@ dist-hook:
find $(distdir) -type d \( -iname ".svn" -or -iname ".git" \) | xargs rm -rf
print-version:
@echo $(VERSION)
@echo $(PACKAGE_VERSION)
.PHONY: precross cross
precross-%: all
@@ -54,7 +54,7 @@ empty :=
space := $(empty) $(empty)
comma := ,
CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ @MAYBE_RS@
CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ @MAYBE_RS@ @MAYBE_DOTNETCORE@
CROSS_LANGS_COMMA_SEPARATED = $(subst $(space),$(comma),$(CROSS_LANGS))
if WITH_PY3
@@ -111,9 +111,13 @@ EXTRA_DIST = \
.clang-format \
.editorconfig \
.travis.yml \
.rustfmt.toml \
.dockerignore \
appveyor.yml \
bower.json \
build \
bootstrap.sh \
cleanup.sh \
CMakeLists.txt \
composer.json \
contrib \
@@ -123,7 +127,7 @@ EXTRA_DIST = \
doap.rdf \
package.json \
sonar-project.properties \
Dockerfile \
LANGUAGES.md \
LICENSE \
CHANGES \
NOTICE \

View File

@@ -1,5 +1,5 @@
Apache Thrift
Copyright 2006-2010 The Apache Software Foundation.
Copyright 2006-2017 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
The Apache Software Foundation (http://www.apache.org/).

View File

@@ -1,7 +1,7 @@
Apache Thrift
=============
Last Modified: 2014-03-16
Last Modified: 2017-11-10
License
=======
@@ -33,15 +33,22 @@ level processing. The code generation system takes a simple definition
language as its input and generates code across programming languages that
uses the abstracted stack to build interoperable RPC clients and servers.
![Apache Thrift Layered Architecture](doc/images/thrift-layers.png)
Thrift makes it easy for programs written in different programming
languages to share data and call remote procedures. With support
for [over 20 programming languages](LANGUAGES.md), chances are Thrift
supports the ones that you currently use.
Thrift is specifically designed to support non-atomic version changes
across client and server code.
For more details on Thrift's design and implementation, take a gander at
the Thrift whitepaper included in this distribution or at the README.md files
the Thrift whitepaper included in this distribution or at the README.md file
in your particular subdirectory of interest.
Hierarchy
=========
Project Hierarchy
=================
thrift/
@@ -60,6 +67,7 @@ thrift/
php/
py/
rb/
...
test/
@@ -162,3 +170,9 @@ To run the cross-language test suite, please run:
This will run a set of tests that use different language clients and
servers.
Development
===========
To build the same way Travis CI builds the project you should use docker.
We have [comprehensive building instructions for docker](build/docker/README.md).

View File

@@ -1,6 +1,6 @@
Pod::Spec.new do |s|
s.name = "Thrift"
s.version = "1.0.0"
s.version = "0.11.0"
s.summary = "Apache Thrift is a lightweight, language-independent software stack with an associated code generation mechanism for RPC."
s.description = <<-DESC
The Apache Thrift software framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages.
@@ -13,6 +13,6 @@ The Apache Thrift software framework, for scalable cross-language services devel
s.osx.deployment_target = '10.8'
s.ios.framework = 'CFNetwork'
s.osx.framework = 'CoreServices'
s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-1.0.0" }
s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-0.11.0" }
s.source_files = 'lib/cocoa/src/**/*.{h,m,swift}'
end

View File

@@ -1,5 +1,5 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_check_openssl.html
# https://www.gnu.org/software/autoconf-archive/ax_check_openssl.html
# ===========================================================================
#
# SYNOPSIS
@@ -32,7 +32,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 8
#serial 10
AU_ALIAS([CHECK_SSL], [AX_CHECK_OPENSSL])
AC_DEFUN([AX_CHECK_OPENSSL], [
@@ -51,7 +51,7 @@ AC_DEFUN([AX_CHECK_OPENSSL], [
], [
# if pkg-config is installed and openssl has installed a .pc file,
# then use that information and don't search ssldirs
AC_PATH_PROG([PKG_CONFIG], [pkg-config])
AC_CHECK_TOOL([PKG_CONFIG], [pkg-config])
if test x"$PKG_CONFIG" != x""; then
OPENSSL_LDFLAGS=`$PKG_CONFIG openssl --libs-only-L 2>/dev/null`
if test $? = 0; then

View File

@@ -0,0 +1,177 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_compare_version.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
#
# DESCRIPTION
#
# This macro compares two version strings. Due to the various number of
# minor-version numbers that can exist, and the fact that string
# comparisons are not compatible with numeric comparisons, this is not
# necessarily trivial to do in a autoconf script. This macro makes doing
# these comparisons easy.
#
# The six basic comparisons are available, as well as checking equality
# limited to a certain number of minor-version levels.
#
# The operator OP determines what type of comparison to do, and can be one
# of:
#
# eq - equal (test A == B)
# ne - not equal (test A != B)
# le - less than or equal (test A <= B)
# ge - greater than or equal (test A >= B)
# lt - less than (test A < B)
# gt - greater than (test A > B)
#
# Additionally, the eq and ne operator can have a number after it to limit
# the test to that number of minor versions.
#
# eq0 - equal up to the length of the shorter version
# ne0 - not equal up to the length of the shorter version
# eqN - equal up to N sub-version levels
# neN - not equal up to N sub-version levels
#
# When the condition is true, shell commands ACTION-IF-TRUE are run,
# otherwise shell commands ACTION-IF-FALSE are run. The environment
# variable 'ax_compare_version' is always set to either 'true' or 'false'
# as well.
#
# Examples:
#
# AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8])
# AX_COMPARE_VERSION([3.15],[lt],[3.15.8])
#
# would both be true.
#
# AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8])
# AX_COMPARE_VERSION([3.15],[gt],[3.15.8])
#
# would both be false.
#
# AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8])
#
# would be true because it is only comparing two minor versions.
#
# AX_COMPARE_VERSION([3.15.7],[eq0],[3.15])
#
# would be true because it is only comparing the lesser number of minor
# versions of the two values.
#
# Note: The characters that separate the version numbers do not matter. An
# empty string is the same as version 0. OP is evaluated by autoconf, not
# configure, so must be a string, not a variable.
#
# The author would like to acknowledge Guido Draheim whose advice about
# the m4_case and m4_ifvaln functions make this macro only include the
# portions necessary to perform the specific comparison specified by the
# OP argument in the final configure script.
#
# LICENSE
#
# Copyright (c) 2008 Tim Toolan <toolan@ele.uri.edu>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 12
dnl #########################################################################
AC_DEFUN([AX_COMPARE_VERSION], [
AC_REQUIRE([AC_PROG_AWK])
# Used to indicate true or false condition
ax_compare_version=false
# Convert the two version strings to be compared into a format that
# allows a simple string comparison. The end result is that a version
# string of the form 1.12.5-r617 will be converted to the form
# 0001001200050617. In other words, each number is zero padded to four
# digits, and non digits are removed.
AS_VAR_PUSHDEF([A],[ax_compare_version_A])
A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
AS_VAR_PUSHDEF([B],[ax_compare_version_B])
B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary
dnl # then the first line is used to determine if the condition is true.
dnl # The sed right after the echo is to remove any indented white space.
m4_case(m4_tolower($2),
[lt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[gt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[le],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],
[ge],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],[
dnl Split the operator from the subversion count if present.
m4_bmatch(m4_substr($2,2),
[0],[
# A count of zero means use the length of the shorter version.
# Determine the number of characters in A and B.
ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'`
ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'`
# Set A to no more than B's length and B to no more than A's length.
A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"`
B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"`
],
[[0-9]+],[
# A count greater than zero means use only that many subversions
A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
],
[.+],[
AC_WARNING(
[illegal OP numeric parameter: $2])
],[])
# Pad zeros at end of numbers to make same length.
ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`"
B="$B`echo $A | sed 's/./0/g'`"
A="$ax_compare_version_tmp_A"
# Check for equality or inequality as necessary.
m4_case(m4_tolower(m4_substr($2,0,2)),
[eq],[
test "x$A" = "x$B" && ax_compare_version=true
],
[ne],[
test "x$A" != "x$B" && ax_compare_version=true
],[
AC_WARNING([illegal OP parameter: $2])
])
])
AS_VAR_POPDEF([A])dnl
AS_VAR_POPDEF([B])dnl
dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE.
if test "$ax_compare_version" = "true" ; then
m4_ifvaln([$4],[$4],[:])dnl
m4_ifvaln([$5],[else $5])dnl
fi
]) dnl AX_COMPARE_VERSION

View File

@@ -118,7 +118,7 @@ AC_DEFUN([AX_CHECK_JAVA_CLASS],
AC_DEFUN([AX_CHECK_ANT_VERSION],
[
AC_MSG_CHECKING(for ant version > $2)
ANT_VALID=`expr $($1 -version 2>/dev/null | sed -n 's/.*version \(@<:@0-9\.@:>@*\).*/\1/p') \>= $2`
ANT_VALID=`expr "x$(printf "$2\n$($1 -version 2>/dev/null | sed -n 's/.*version \(@<:@0-9\.@:>@*\).*/\1/p')" | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -g | sed -n 1p)" = "x$2"`
if test "x$ANT_VALID" = "x1" ; then
AC_MSG_RESULT(yes)
else

View File

@@ -1,5 +1,5 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_lua.html
# https://www.gnu.org/software/autoconf-archive/ax_lua.html
# ===========================================================================
#
# SYNOPSIS
@@ -166,7 +166,7 @@
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
@@ -181,7 +181,7 @@
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 39
#serial 40
dnl =========================================================================
dnl AX_PROG_LUA([MINIMUM-VERSION], [TOO-BIG-VERSION],

View File

@@ -1,6 +1,6 @@
# ==============================================================================
# http://www.gnu.org/software/autoconf-archive/ax_prog_dotnetcore_version.html
# ==============================================================================
# ===============================================================================
# https://www.gnu.org/software/autoconf-archive/ax_prog_dotnetcore_version.html
# ===============================================================================
#
# SYNOPSIS
#
@@ -33,7 +33,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 1
#serial 2
AC_DEFUN([AX_PROG_DOTNETCORE_VERSION],[
AC_REQUIRE([AC_PROG_SED])

View File

@@ -1,5 +1,5 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_prog_haxe_version.html
# https://www.gnu.org/software/autoconf-archive/ax_prog_haxe_version.html
# ===========================================================================
#
# SYNOPSIS
@@ -32,7 +32,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 1
#serial 2
AC_DEFUN([AX_PROG_HAXE_VERSION],[
AC_REQUIRE([AC_PROG_SED])

View File

@@ -1,5 +1,5 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_prog_perl_modules.html
# https://www.gnu.org/software/autoconf-archive/ax_prog_perl_modules.html
# ===========================================================================
#
# SYNOPSIS
@@ -32,7 +32,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 7
#serial 8
AU_ALIAS([AC_PROG_PERL_MODULES], [AX_PROG_PERL_MODULES])
AC_DEFUN([AX_PROG_PERL_MODULES],[dnl

View File

@@ -1,177 +0,0 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_compare_version.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
#
# DESCRIPTION
#
# This macro compares two version strings. Due to the various number of
# minor-version numbers that can exist, and the fact that string
# comparisons are not compatible with numeric comparisons, this is not
# necessarily trivial to do in a autoconf script. This macro makes doing
# these comparisons easy.
#
# The six basic comparisons are available, as well as checking equality
# limited to a certain number of minor-version levels.
#
# The operator OP determines what type of comparison to do, and can be one
# of:
#
# eq - equal (test A == B)
# ne - not equal (test A != B)
# le - less than or equal (test A <= B)
# ge - greater than or equal (test A >= B)
# lt - less than (test A < B)
# gt - greater than (test A > B)
#
# Additionally, the eq and ne operator can have a number after it to limit
# the test to that number of minor versions.
#
# eq0 - equal up to the length of the shorter version
# ne0 - not equal up to the length of the shorter version
# eqN - equal up to N sub-version levels
# neN - not equal up to N sub-version levels
#
# When the condition is true, shell commands ACTION-IF-TRUE are run,
# otherwise shell commands ACTION-IF-FALSE are run. The environment
# variable 'ax_compare_version' is always set to either 'true' or 'false'
# as well.
#
# Examples:
#
# AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8])
# AX_COMPARE_VERSION([3.15],[lt],[3.15.8])
#
# would both be true.
#
# AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8])
# AX_COMPARE_VERSION([3.15],[gt],[3.15.8])
#
# would both be false.
#
# AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8])
#
# would be true because it is only comparing two minor versions.
#
# AX_COMPARE_VERSION([3.15.7],[eq0],[3.15])
#
# would be true because it is only comparing the lesser number of minor
# versions of the two values.
#
# Note: The characters that separate the version numbers do not matter. An
# empty string is the same as version 0. OP is evaluated by autoconf, not
# configure, so must be a string, not a variable.
#
# The author would like to acknowledge Guido Draheim whose advice about
# the m4_case and m4_ifvaln functions make this macro only include the
# portions necessary to perform the specific comparison specified by the
# OP argument in the final configure script.
#
# LICENSE
#
# Copyright (c) 2008 Tim Toolan <toolan@ele.uri.edu>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 11
dnl #########################################################################
AC_DEFUN([AX_COMPARE_VERSION], [
AC_REQUIRE([AC_PROG_AWK])
# Used to indicate true or false condition
ax_compare_version=false
# Convert the two version strings to be compared into a format that
# allows a simple string comparison. The end result is that a version
# string of the form 1.12.5-r617 will be converted to the form
# 0001001200050617. In other words, each number is zero padded to four
# digits, and non digits are removed.
AS_VAR_PUSHDEF([A],[ax_compare_version_A])
A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
AS_VAR_PUSHDEF([B],[ax_compare_version_B])
B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary
dnl # then the first line is used to determine if the condition is true.
dnl # The sed right after the echo is to remove any indented white space.
m4_case(m4_tolower($2),
[lt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[gt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[le],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],
[ge],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],[
dnl Split the operator from the subversion count if present.
m4_bmatch(m4_substr($2,2),
[0],[
# A count of zero means use the length of the shorter version.
# Determine the number of characters in A and B.
ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'`
ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'`
# Set A to no more than B's length and B to no more than A's length.
A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"`
B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"`
],
[[0-9]+],[
# A count greater than zero means use only that many subversions
A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
],
[.+],[
AC_WARNING(
[illegal OP numeric parameter: $2])
],[])
# Pad zeros at end of numbers to make same length.
ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`"
B="$B`echo $A | sed 's/./0/g'`"
A="$ax_compare_version_tmp_A"
# Check for equality or inequality as necessary.
m4_case(m4_tolower(m4_substr($2,0,2)),
[eq],[
test "x$A" = "x$B" && ax_compare_version=true
],
[ne],[
test "x$A" != "x$B" && ax_compare_version=true
],[
AC_WARNING([illegal OP parameter: $2])
])
])
AS_VAR_POPDEF([A])dnl
AS_VAR_POPDEF([B])dnl
dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE.
if test "$ax_compare_version" = "true" ; then
m4_ifvaln([$4],[$4],[:])dnl
m4_ifvaln([$5],[else $5])dnl
fi
]) dnl AX_COMPARE_VERSION

View File

@@ -19,7 +19,7 @@
# build Apache Thrift on AppVeyor - https://ci.appveyor.com
version: '1.0.0-dev.{build}'
version: '0.11.0.{build}'
shallow_clone: true

View File

@@ -46,9 +46,12 @@ if [ "$AUTOMAKE_VERSION" \< "1.13" ]; then
exit 1
fi
set -e
autoscan
$LIBTOOLIZE --copy --automake
aclocal -I ./aclocal
autoheader
sed '/undef VERSION/d' config.hin > config.hin2
mv config.hin2 config.hin
autoconf
automake --copy --add-missing --foreign

View File

@@ -35,7 +35,7 @@ set(CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON)
set(CMAKE_COLOR_MAKEFILE ON)
# Define the generic version of the libraries here
set(GENERIC_LIB_VERSION "0.1.0")
set(GENERIC_LIB_VERSION "0.11.0")
set(GENERIC_LIB_SOVERSION "0")
# Set the default build type to release with debug info

View File

@@ -1,81 +1,168 @@
# Apache Thrift Docker containers
Docker containers used to build and test Apache Thrift for a variety of platforms.
# Docker Integration #
## Available Containers
Due to the large number of language requirements to build Apache Thrift, docker containers are used to build and test the project on a variety of platforms to provide maximum test coverage.
### CentOS
* 7.3 (current)
## Travis CI Integration ##
The Travis CI scripts use the following environment variables and logic to determine their behavior.
### Environment Variables ###
| Variable | Default | Usage |
| -------- | ----- | ------- |
| `DISTRO` | `ubuntu-xenial` | Set by various build jobs in `.travis.yml` to run builds in different containers. Not intended to be set externally.|
| `DOCKER_REPO` | `thrift/thrift-build` | The name of the Docker Hub repository to obtain and store docker images. |
| `DOCKER_USER` | `<none>` | The Docker Hub account name containing the repository. |
| `DOCKER_PASS` | `<none>` | The Docker Hub account password to use when pushing new tags. |
For example, the default docker image that is used in builds if no overrides are specified would be: `thrift/thrift-build:ubuntu-xenial`
### Forks ###
If you have forked the Apache Thrift repository and you would like to use your own Docker Hub account to store thrift build images, you can use the Travis CI web interface to set the `DOCKER_USER`, `DOCKER_PASS`, and `DOCKER_REPO` variables in a secure manner. Your fork builds will then pull, push, and tag the docker images in your account.
### Logic ###
The Travis CI build runs in two phases - first the docker images are rebuilt for each of the three supported containers if they do not match the Dockerfile that was used to build the most recent tag. If a `DOCKER_PASS` environment variable is specified, the docker stage builds will attempt to log into Docker Hub and push the resulting tags.
## Supported Containers ##
The Travis CI (continuous integration) builds use the Ubuntu Trusty, Xenial, and Artful images to maximize language level coverage.
### Ubuntu ###
* trusty (legacy)
* xenial (stable)
* artful (latest)
## Unsupported Containers ##
These containers may be in various states, and may not build everything.
### CentOS ###
* 7.3
* make check in lib/py may hang in test_sslsocket - root cause unknown
### Debian ###
### Debian
* jessie
* stretch (current)
* stretch
* make check in lib/cpp fails due to https://svn.boost.org/trac10/ticket/12507
### Ubuntu
* trusty
* xenial (current)
## Building like Travis CI does, locally ##
## Dependencies
* A working Docker environment. A Vagrantfile is provided which will setup an Ubuntu host and working Docker environment as well as build the Apache Thrift Docker container for testing and development.
We recommend you build locally the same way Travis CI does, so that when you submit your pull request you will run into fewer surprises. To make it a little easier, put the following into your `~/.bash_aliases` file:
## Usage
From the Apache Thrift code base root:
# Kill all running containers.
alias dockerkillall='docker kill $(docker ps -q)'
* Build
# Delete all stopped containers.
alias dockercleanc='printf "\n>>> Deleting stopped containers\n\n" && docker rm $(docker ps -a -q)'
docker build -t thrift build/docker/ubuntu-xenial
# Delete all untagged images.
alias dockercleani='printf "\n>>> Deleting untagged images\n\n" && docker rmi $(docker images -q -f dangling=true)'
or
# Delete all stopped containers and untagged images.
alias dockerclean='dockercleanc || true && dockercleani'
docker build -t thrift build/docker/centos-7.3
# Build a thrift docker image (run from top level of git repo): argument #1 is image type (ubuntu, centos, etc).
function dockerbuild
{
docker build -t $1 build/docker/$1
}
* Run
# Run a thrift docker image: argument #1 is image type (ubuntu, centos, etc).
function dockerrun
{
docker run -v $(pwd):/thrift/src -it $1 /bin/bash
}
docker run -v $(pwd):/thrift/src -it thrift /bin/bash
To pull down the current image being used to build (the same way Travis CI does it) - if it is out of date in any way it will build a new one for you:
## Core Tool Versions per Dockerfile
| Tool | centos-7.3 | debian-stretch | ubuntu-xenial |
|-----------|------------|----------------|---------------|
| ant | 1.9.2 | 1.9.9 | 1.9.6 |
| autoconf | 2.69 | 2.69 | 2.69 |
| automake | 1.13.4 | 1.15 | 1.15 |
| bison | 2.7 | 3.0.4 | 3.0.4 |
| boost | 1.53.0 | 1.62.0 | 1.58.0 |
| cmake | 3.6.3 | 3.7.2 | 3.5.1 |
| flex | 2.5.37 | 2.6.1 | 2.6.0 |
| glibc | 2.17 | 2.24 | 2.23 |
| libevent | 2.0.21 | 2.0.21 | 2.0.21 |
| libstdc++ | 4.8.5 | 6.3.0 | 5.4.0 |
| make | 3.82 | 4.1 | 4.1 |
| openssl | 1.0.1e | 1.1.0f | 1.0.2g |
thrift$ DOCKER_REPO=thrift/thrift-build DISTRO=ubuntu-xenial build/docker/refresh.sh
## Language Versions per Dockerfile
| Language | centos-7.3 | debian-stretch | ubuntu-xenial |
|-----------|------------|----------------|---------------|
| as3 | | | |
| C++-gcc | 4.8.5 | 6.3.0 | 5.4.0 |
| C++-clang | 3.4.2 | 3.8.1 | 3.8 |
| C# (mono) | 4.6.2 | 4.6.2.7 | 5.2.0.215 |
| c_glib | 2.46.2 | 2.50.3 | 2.48.2 |
| cocoa | | | |
| d | 2.076.0 | 2.075.1 | 2.075.1 |
| dart | 1.24.2 | 1.24.2 | 1.24.2 |
| delphi | | | |
| dotnet | | | |
| erlang | 20 | 19.2 | 18.3 |
| go | 1.9 | 1.7.4 | 1.6.2 |
| haskell | 7.6.3 | 8.0.1 | 7.10.3 |
| haxe | | 3.2.1 | 3.2.1 |
| java | 1.8.0_141 | 1.8.0_141 | 1.8.0_131 |
| js | | | |
| lua | 5.3.4 | 5.2.4 | 5.2.4 |
| nodejs | 6.11.1 | 8.4.0 | 8.4.0 |
| ocaml | 4.01.0 | 4.02.3 | 4.02.3 |
| perl | 5.16.3 | 5.24.1 | 5.22.1 |
| php | 5.4.16 | 7.0.19 | 7.0.22 |
| python2 | 2.7.5 | 2.7.13 | 2.7.12 |
| python3 | 3.4.5 | 3.5.3 | 3.5.2 |
| ruby | 2.0.0p648 | 2.3.3p222 | 2.3.1p112 |
| rust | 1.17.0 | 1.14.0 | 1.15.1 |
| smalltalk | | | |
| swift | | | |
To run all unit tests (just like Travis CI):
thrift$ dockerrun ubuntu-xenial
root@8caf56b0ce7b:/thrift/src# build/docker/scripts/autotools.sh
To run the cross tests (just like Travis CI):
thrift$ dockerrun ubuntu-xenial
root@8caf56b0ce7b:/thrift/src# build/docker/scripts/cross-test.sh
When you are done, you want to clean up occasionally so that docker isn't using lots of extra disk space:
thrift$ dockerclean
You need to run the docker commands from the root of the git repository for them to work.
When you are done in the root docker shell you can `exit` to go back to your user host shell. Once the unit tests and cross test passes locally, then submit he changes, and squash the pull request to one commit to make it easier to merge. Thanks. I am going to update the docker README.md with this information so others can leverage it too. Now you are building like Travis CI does!
## Raw Commands for Building with Docker ##
If you do not want to use the same scripts Travis CI does, you can do it manually:
Build the image:
thrift$ docker build -t thrift build/docker/ubuntu-xenial
Open a command prompt in the image:
thrift$ docker run -v $(pwd):/thrift/src -it thrift /bin/bash
## Core Tool Versions per Dockerfile ##
Last updated: October 1, 2017
| Tool | ubuntu-trusty | ubuntu-xenial | ubuntu-artful | Notes |
| :-------- | :------------ | :------------ | :------------ | :---- |
| ant | 1.9.3 | 1.9.6 | 1.9.9 | |
| autoconf | 2.69 | 2.69 | 2.69 | |
| automake | 1.14.1 | 1.15 | 1.15 | |
| bison | 3.0.2 | 3.0.4 | 3.0.4 | |
| boost | 1.54.0 | 1.58.0 | 1.63.0 | artful: stock boost 1.62.0 has problems running unit tests |
| cmake | 3.2.2 | 3.5.1 | 3.9.1 | |
| cppcheck | 1.61 | 1.72 | 1.80 | |
| flex | 2.5.35 | 2.6.0 | 2.6.1 | |
| glibc | 2.19 | 2.23 | 2.26 | |
| libevent | 2.0.21 | 2.0.21 | 2.1 | |
| libstdc++ | 4.8.4 | 5.4.0 | 7.2.0 | |
| make | 3.81 | 4.1 | 4.1 | |
| openssl | 1.0.1f | 1.0.2g | 1.0.2g | |
| qt5 | 5.2.1 | 5.5.1 | 5.9.1 | |
## Compiler/Language Versions per Dockerfile ##
Last updated: October 1, 2017
| Language | ubuntu-trusty | ubuntu-xenial | ubuntu-artful | Notes |
| :-------- | :------------ | :------------ | :------------ | :---- |
| as3 | | | | Not in CI |
| C++ gcc | 4.8.4 | 5.4.0 | 7.2.0 | |
| C++ clang | 3.4 | 3.8 | 4.0 | |
| C# (mono) | 3.2.8.0 | 4.2.1 | 4.6.2.7 | |
| c_glib | 2.40.2 | 2.48.2 | 2.54.0 | |
| cocoa | | | | Not in CI |
| d | 2.070.2 | 2.073.2 | 2.076.0 | |
| dart | 1.20.1 | 1.24.2 | 1.24.2 | |
| delphi | | | | Not in CI |
| dotnet | | 2.0.3 | 2.0.3 | |
| erlang | R16B03 | 18.3 | 20.0.4 | |
| go | 1.2.1 | 1.6.2 | 1.8.3 | |
| haskell | 7.6.3 | 7.10.3 | 8.0.2 | |
| haxe | | 3.2.1 | 3.4.2 | disabled in trusty builds - cores on install v3.0.0, disabled in artful builds - see THRIFT-4352 |
| java | 1.7.0_151 | 1.8.0_131 | 1.8.0_151 | |
| js | | | | Unsure how to look for version info? |
| lua | 5.1.5 | 5.2.4 | 5.2.4 | Lua 5.3: see THRIFT-4386 |
| nodejs | | 4.2.6 | 8.9.1 | trusty has node.js 0.10.0 which is too old |
| ocaml | | 4.02.3 | 4.04.0 | |
| perl | 5.18.2 | 5.22.1 | 5.26.0 | |
| php | 5.5.9 | 7.0.22 | 7.1.8 | |
| python | 2.7.6 | 2.7.12 | 2.7.14 | |
| python3 | 3.4.3 | 3.5.2 | 3.6.3 | |
| ruby | 1.9.3p484 | 2.3.1p112 | 2.3.3p222 | |
| rust | 1.15.1 | 1.15.1 | 1.18.0 | |
| smalltalk | | | | Not in CI |
| swift | | | | Not in CI |

View File

@@ -1,42 +0,0 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Download prebuilt docker image and compare Dockerfile hash values
set -ex
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISTRO=$1
SRC_IMG=thrift/thrift-build:$DISTRO
function try_pull {
docker pull $SRC_IMG
cd ${SCRIPT_DIR}/$DISTRO
docker run $SRC_IMG bash -c 'cd .. && sha512sum Dockerfile' > .Dockerfile.sha512
sha512sum -c .Dockerfile.sha512
}
if try_pull; then
echo Dockerfile seems identical. No need to rebuild from scratch.
docker tag thrift/thrift-build:$DISTRO thrift-build:$DISTRO
else
echo Either Dockerfile has changed or pull failure. Need to build brand new one.
exit 1
fi

82
vendor/github.com/apache/thrift/build/docker/refresh.sh generated vendored Executable file
View File

@@ -0,0 +1,82 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# The build has two stages: "docker" and "test"
# The "docker" stage is meant to rebuild the docker images
# if needed. If we cannot push that result however then
# there is no reason to do anything.
# The "test" stage is an actual test job. Even if the docker
# image doesn't match what's in the repo, we still build
# the image so the build job can run properly.
#
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DOCKER_TAG=$DOCKER_REPO:$DISTRO
function dockerfile_changed {
# image may not exist yet, so we have to let it fail silently:
docker pull $DOCKER_TAG || true
docker run $DOCKER_TAG bash -c 'cd .. && sha512sum Dockerfile' > .Dockerfile.sha512
sha512sum -c .Dockerfile.sha512
}
#
# If this build has no DOCKER_PASS and it is in the docker stage
# then there's no reason to do any processing because we cannot
# push the result if the Dockerfile changed.
#
if [[ "$TRAVIS_BUILD_STAGE" == "docker" ]] && [[ -z "$DOCKER_PASS" ]]; then
echo Detected docker stage build and no defined DOCKER_PASS, this build job will be skipped.
echo Subsequent jobs in the test stage may each rebuild the docker image.
exit 0
fi
pushd ${SCRIPT_DIR}/$DISTRO
if dockerfile_changed; then
echo Dockerfile has not changed. No need to rebuild.
exit 0
else
echo Dockerfile has changed.
fi
popd
#
# Dockerfile has changed - rebuild it for the current build job.
# If it is a "docker" stage build then we want to push it back
# to the DOCKER_REPO. If it is a "test" stage build then we do
# not. If nobody defined a DOCKER_PASS then it doesn't matter.
#
echo Rebuilding docker image $DISTRO
docker build --tag $DOCKER_TAG build/docker/$DISTRO
if [[ "$TRAVIS_BUILD_STAGE" == "docker" ]] && [[ ! -z "$DOCKER_USER" ]] && [[ ! -z "$DOCKER_PASS" ]]; then
echo Pushing docker image $DOCKER_TAG
docker login -u $DOCKER_USER -p $DOCKER_PASS
docker push $DOCKER_TAG
else
echo Not pushing docker image: either not a docker stage build job, or one of DOCKER_USER or DOCKER_PASS is undefined.
fi

30
vendor/github.com/apache/thrift/build/docker/run.sh generated vendored Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DOCKER_TAG=$DOCKER_REPO:$DISTRO
printenv | sort
docker run --net=host -e BUILD_LIBS="$BUILD_LIBS" $BUILD_ENV -v $(pwd):/thrift/src \
-it $DOCKER_TAG build/docker/scripts/$SCRIPT $BUILD_ARG

62
vendor/github.com/apache/thrift/build/docker/scripts/sca.sh generated vendored Executable file
View File

@@ -0,0 +1,62 @@
#!/bin/sh
set -ev
#
# Generate thrift files so the static code analysis includes an analysis
# of the files the thrift compiler spits out. If running interactively
# set the NOBUILD environment variable to skip the boot/config/make phase.
#
if [[ -z "$NOBUILD" ]]; then
./bootstrap.sh
./configure --enable-tutorial=no
make -j3 precross
fi
#
# C/C++ static code analysis with cppcheck
# add --error-exitcode=1 to --enable=all as soon as everything is fixed
#
# Python code style check with flake8
#
# search for TODO etc within source tree
# some statistics about the code base
# some info about the build machine
# Compiler cppcheck (All)
cppcheck --force --quiet --inline-suppr --enable=all -j2 compiler/cpp/src
# C++ cppcheck (All)
cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp
# C Glib cppcheck (All)
cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib
# Silent error checks
# See THRIFT-4371 : flex generated code triggers "possible null pointer dereference" in yy_init_buffer
cppcheck --force --quiet --inline-suppr --suppress="*:thrift/thriftl.cc" --error-exitcode=1 -j2 compiler/cpp/src
cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp
cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib
# Python code style
flake8 --ignore=E501 lib/py
flake8 tutorial/py
# THRIFT-4371 : generated files are excluded because they haven't been scrubbed yet
flake8 --ignore=E501 --exclude="*/gen-py*/*" test/py
flake8 test/py.twisted
flake8 test/py.tornado
flake8 --ignore=E501 test/test.py
flake8 --ignore=E501,E722 test/crossrunner
flake8 test/features
# TODO etc
echo FIXMEs: `grep -r FIXME * | wc -l`
echo HACKs: `grep -r HACK * | wc -l`
echo TODOs: `grep -r TODO * | wc -l`
# LoC
sloccount .
# System Info
dpkg -l
uname -a

View File

@@ -1,27 +1,28 @@
#!/bin/sh
set -ex
# Wraps autotools.sh, but each binary crashes if it exhibits undefined behavior. See
# http://releases.llvm.org/3.8.0/tools/clang/docs/UndefinedBehaviorSanitizer.html
set -e
# Wraps autotools.sh, but each binary crashes if it exhibits undefined behavior.
# Set the undefined behavior flags. This crashes on all undefined behavior except for
# undefined casting, aka "vptr".
#
# TODO: fix undefined vptr behavior and turn this option back on.
export CFLAGS="-fsanitize=undefined -fno-sanitize-recover=undefined"
# Builds without optimization and with debugging symbols for making crash reports more
# readable.
export CFLAGS="${CFLAGS} -O0 -ggdb3"
export CFLAGS="-fsanitize=undefined -fno-sanitize-recover=undefined -O0 -ggdb3 -fno-omit-frame-pointer"
export CXXFLAGS="${CFLAGS}"
export LDFLAGS="-lubsan"
export UBSAN_OPTIONS=print_stacktrace=1
# llvm-symbolizer must be on PATH, but the above installation instals a binary called
# "llvm-symbolizer-3.8", not "llvm-symbolizer". This fixes that with a softlink in a new
# directory.
#
# work around https://svn.boost.org/trac10/ticket/11632 if present
#
sed -i 's/, stream_t(rdbuf()) /, stream_t(pbase_type::member.get())/g' /usr/include/boost/format/alt_sstream.hpp
# llvm-symbolizer must be on PATH to get a stack trace on error
CLANG_PATH="$(mktemp -d)"
trap "rm -rf ${CLANG_PATH}" EXIT
ln -s "$(whereis llvm-symbolizer-3.8 | rev | cut -d ' ' -f 1 | rev)" \
ln -s "$(whereis llvm-symbolizer-4.0 | rev | cut -d ' ' -f 1 | rev)" \
"${CLANG_PATH}/llvm-symbolizer"
export PATH="${CLANG_PATH}:${PATH}"
llvm-symbolizer -version

View File

@@ -0,0 +1,254 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Apache Thrift Docker build environment for Ubuntu Artful
# Using all stock Ubuntu Artful packaging except for:
# - cpp: stock boost 1.62 in artful has a nasty bug so we use stock boost 1.63
# - d: does not come with Ubuntu so we're installing the latest
# - d: deimos for libevent and openssl omitted - not compatible / build errors
# - haxe: see THRIFT-4352, but test/haxe cores during testing
# and hxcpp 3.4.64 is not compatible with artful
#
FROM buildpack-deps:artful-scm
MAINTAINER Apache Thrift <dev@thrift.apache.org>
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y --no-install-recommends \
apt \
apt-transport-https \
apt-utils \
curl \
dirmngr \
software-properties-common \
wget
# csharp (mono) - if we ever want a later version
# RUN echo "deb http://download.mono-project.com/repo/debian xenial main" | tee /etc/apt/sources.list.d/mono.list && \
# apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF
# dotnet (core)
RUN curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.gpg && \
echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-artful-prod artful main" > /etc/apt/sources.list.d/dotnetdev.list
# node.js (this step runs apt-get update internally) - if we ever want a later version
RUN curl -sL https://deb.nodesource.com/setup_8.x | bash -
### install general dependencies
RUN apt-get install -y --no-install-recommends \
`# General dependencies` \
bash-completion \
bison \
build-essential \
clang \
cmake \
debhelper \
flex \
gdb \
llvm \
ninja-build \
pkg-config \
valgrind \
vim
ENV PATH /usr/lib/llvm-3.8/bin:$PATH
# boost-1.62 has a terrible bug in boost::test, see https://svn.boost.org/trac10/ticket/12507
RUN apt-get install -y --no-install-recommends \
`# C++ dependencies` \
libboost1.63-all-dev \
libevent-dev \
libssl-dev \
qt5-default \
qtbase5-dev \
qtbase5-dev-tools
RUN apt-get install -y --no-install-recommends \
`# csharp (mono) dependencies` \
mono-devel
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \
wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \
wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - && \
apt-get update && \
apt-get install -y --no-install-recommends \
`# D dependencies` \
dmd-bin \
libphobos2-dev \
dub \
dfmt \
dscanner \
libevent-dev \
libssl-dev \
xdg-utils
# libevent deimos doesn't seem to work so not enabling it:
# RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \
# curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \
# mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
# mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \
# rm -rf libevent-master
# openssl deimos doesn't work with openssl-1.0.2 so not enabling it:
# RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \
# mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
# mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \
# rm -rf openssl-master
# dart cannot be downloaded by aptitude because of
# https://github.com/dart-lang/sdk/issues/30512
# RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
# curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \
# apt-get update && \
# apt-get install -y --no-install-recommends \
# `# Dart dependencies` \
# dart
# so instead we do:
RUN wget https://storage.googleapis.com/dart-archive/channels/stable/release/latest/linux_packages/dart_1.24.2-1_amd64.deb && \
dpkg -i dart_1.24.2-1_amd64.deb && \
rm dart_1.24.2-1_amd64.deb
ENV PATH /usr/lib/dart/bin:$PATH
RUN apt-get install -y --no-install-recommends \
`# dotnet core dependencies` \
dotnet-sdk-2.0.3
RUN apt-get install -y --no-install-recommends \
`# Erlang dependencies` \
erlang-base \
erlang-eunit \
erlang-dev \
erlang-tools \
rebar
RUN apt-get install -y --no-install-recommends \
`# GlibC dependencies` \
libglib2.0-dev
RUN apt-get install -y --no-install-recommends \
`# golang (go) dependencies` \
golang-go \
golang-race-detector-runtime
RUN apt-get install -y --no-install-recommends \
`# Haskell dependencies` \
ghc \
cabal-install
# see THRIFT-4352, test/haxe cores on artful
# RUN apt-get install -y --no-install-recommends \
# `# Haxe dependencies` \
# haxe \
# neko \
# neko-dev
# RUN haxelib setup --always /usr/share/haxe/lib && \
# haxelib install --always hxcpp
RUN apt-get install -y --no-install-recommends \
`# Java dependencies` \
ant \
ant-optional \
openjdk-8-jdk \
maven
RUN apt-get install -y --no-install-recommends \
`# Lua dependencies` \
lua5.2 \
lua5.2-dev
# https://bugs.launchpad.net/ubuntu/+source/lua5.3/+bug/1707212
# lua5.3 does not install alternatives!
# need to update our luasocket code, lua doesn't have luaL_openlib any more
RUN apt-get install -y --no-install-recommends \
`# Node.js dependencies` \
nodejs
RUN apt-get install -y --no-install-recommends \
`# OCaml dependencies` \
ocaml \
opam && \
opam init --yes && \
opam install --yes oasis
RUN apt-get install -y --no-install-recommends \
`# Perl dependencies` \
libbit-vector-perl \
libclass-accessor-class-perl \
libcrypt-ssleay-perl \
libio-socket-ssl-perl \
libnet-ssleay-perl
RUN apt-get install -y --no-install-recommends \
`# Php dependencies` \
php \
php-cli \
php-dev \
php-pear \
re2c \
phpunit
RUN apt-get install -y --no-install-recommends \
`# Python dependencies` \
python-all \
python-all-dbg \
python-all-dev \
python-ipaddress \
python-pip \
python-setuptools \
python-six \
python-tornado \
python-twisted \
python-wheel \
python-zope.interface && \
pip install --upgrade backports.ssl_match_hostname
RUN apt-get install -y --no-install-recommends \
`# Python3 dependencies` \
python3-all \
python3-all-dbg \
python3-all-dev \
python3-pip \
python3-setuptools \
python3-six \
python3-tornado \
python3-twisted \
python3-wheel \
python3-zope.interface
RUN apt-get install -y --no-install-recommends \
`# Ruby dependencies` \
ruby \
ruby-dev \
ruby-bundler
RUN gem install bundler --no-ri --no-rdoc
RUN apt-get install -y --no-install-recommends \
`# Rust dependencies` \
cargo \
rustc
RUN apt-get install -y --no-install-recommends \
`# Static Code Analysis dependencies` \
cppcheck \
sloccount && \
pip install flake8
# Clean up
RUN rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /tmp/* && \
rm -rf /var/tmp/*
ENV THRIFT_ROOT /thrift
RUN mkdir -p $THRIFT_ROOT/src
COPY Dockerfile $THRIFT_ROOT/
WORKDIR $THRIFT_ROOT/src

View File

@@ -10,68 +10,122 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Apache Thrift Docker build environment for Ubuntu
#
# Known missing client libraries:
# - dotnetcore
# Apache Thrift Docker build environment for Ubuntu Trusty
# Using all stock Ubuntu Trusty packaging except for:
# - d: does not come with Ubuntu so we're installing 2.070.0
# - dart: does not come with Ubuntu so we're installing 1.20.1
# - dotnetcore, disabled because netcore is for 1.0.0-preview and 2.0.0 is out
# - haxe, disabled because the distro comes with 3.0.0 and it cores while installing
# - node.js, disabled because it is at 0.10.0 in the distro which is too old (need 4+)
# - ocaml, disabled because it fails to install properly
#
FROM buildpack-deps:trusty-scm
MAINTAINER Apache Thrift <dev@thrift.apache.org>
ENV DEBIAN_FRONTEND noninteractive
# Add apt sources
# CMAKE
RUN apt-get update && \
apt-get install -y --no-install-recommends software-properties-common && \
add-apt-repository -y ppa:george-edison55/cmake-3.x
# Erlang
RUN echo 'deb http://packages.erlang-solutions.com/debian trusty contrib' > /etc/apt/sources.list.d/erlang_solutions.list && \
curl -sSL https://packages.erlang-solutions.com/debian/erlang_solutions.asc | apt-key add -
# Dart
RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \
sed -i /etc/apt/sources.list.d/dart_stable.list -e 's/https:/http:/g'
# Consider using mirror nearby when building locally
# TODO: Provide option via --build-arg=...
# RUN sed -i /etc/apt/sources.list -e 's!http://archive.ubuntu.com/ubuntu/!http://your/mirror/!g'
RUN apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y --no-install-recommends \
apt \
apt-transport-https \
apt-utils \
curl \
dirmngr \
software-properties-common \
wget
RUN apt-get update && apt-get install -y --no-install-recommends \
`# General dependencies` \
bash-completion \
bison \
build-essential \
clang \
cmake \
debhelper \
flex \
gdb \
llvm \
ninja-build \
pkg-config \
`# Included in buildpack-deps` \
`# autoconf` \
`# automake` \
`# g++` \
`# git` \
`# libtool` \
`# make`
valgrind \
vim
ENV PATH /usr/lib/llvm-3.8/bin:$PATH
RUN apt-get install -y --no-install-recommends \
`# C++ dependencies` \
`# libevent and OpenSSL are needed by D too` \
libboost-dev \
libboost-filesystem-dev \
libboost-program-options-dev \
libboost-system-dev \
libboost-test-dev \
libboost-thread-dev \
libboost-all-dev \
libevent-dev \
libssl-dev \
qt5-default \
qtbase5-dev \
qtbase5-dev-tools
RUN apt-get install -y --no-install-recommends \
`# csharp (mono) dependencies` \
mono-devel
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \
wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \
wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - && \
apt-get update && \
apt-get install -y --no-install-recommends \
`# D dependencies` \
dmd-bin=2.070.2-0 \
libphobos2-dev=2.070.2-0 \
dub \
dfmt \
dscanner \
xdg-utils
# RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \
# curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \
# mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
# mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \
# rm -rf libevent-master
# RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \
# mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
# mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \
# rm -rf openssl-master
RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
`# Dart dependencies` \
dart=1.20.1-1
ENV PATH /usr/lib/dart/bin:$PATH
RUN apt-get install -y --no-install-recommends \
`# Erlang dependencies` \
erlang-base \
erlang-eunit \
erlang-dev \
erlang-tools \
rebar
RUN apt-get install -y --no-install-recommends \
`# GlibC dependencies` \
libglib2.0-dev
RUN apt-get install -y --no-install-recommends \
`# golang (go) dependencies` \
golang-go
RUN apt-get install -y --no-install-recommends \
`# Haskell dependencies` \
ghc \
cabal-install
# disabled because it cores while installing
# RUN apt-get install -y --no-install-recommends \
# `# Haxe dependencies` \
# haxe \
# neko \
# neko-dev && \
# haxelib setup /usr/share/haxe/lib && \
# haxelib install hxcpp 3.2.102
RUN apt-get install -y --no-install-recommends \
`# Java dependencies` \
ant \
@@ -80,28 +134,25 @@ RUN apt-get install -y --no-install-recommends \
maven
RUN apt-get install -y --no-install-recommends \
`# Python dependencies` \
`# TODO:` \
`# Install twisted and zope.interface via pip. we need twisted at ./configure time, otherwise` \
`# py.twisted tests are skipped.` \
python-all \
python-all-dbg \
python-all-dev \
python-pip \
python-setuptools \
python-twisted \
python-zope.interface \
python3-all \
python3-all-dbg \
python3-all-dev \
python3-setuptools \
python3-pip
`# Lua dependencies` \
lua5.1 \
lua5.1-dev
# disabled because it is too old
# RUN apt-get install -y --no-install-recommends \
# `# Node.js dependencies` \
# nodejs \
# npm
# disabled because it fails to install properly
# RUN apt-get install -y --no-install-recommends \
# `# OCaml dependencies` \
# ocaml \
# opam && \
# opam init --yes && \
# opam install --yes oasis
RUN apt-get install -y --no-install-recommends \
`# Ruby dependencies` \
ruby \
ruby-dev \
ruby-bundler \
`# Perl dependencies` \
libbit-vector-perl \
libclass-accessor-class-perl \
@@ -112,119 +163,60 @@ RUN apt-get install -y --no-install-recommends \
RUN apt-get install -y --no-install-recommends \
`# Php dependencies` \
php5 \
php5-dev \
php5-cli \
php5-dev \
php-pear \
re2c \
phpunit \
`# GlibC dependencies` \
libglib2.0-dev
RUN apt-get update && apt-get install -y --no-install-recommends \
`# Erlang dependencies` \
erlang-base \
erlang-eunit \
erlang-dev \
erlang-tools \
rebar
phpunit
RUN apt-get install -y --no-install-recommends \
`# Haskell dependencies` \
ghc \
cabal-install \
`# Haxe dependencies` \
neko \
neko-dev \
libneko0
# Newer release of nodejs
RUN curl -sL https://deb.nodesource.com/setup_6.x | bash
RUN apt-get install -y --no-install-recommends \
`# Node.js dependencies` \
nodejs
# Add mono package repository url to get latest version of mono
RUN echo "deb http://download.mono-project.com/repo/debian trusty main" | tee /etc/apt/sources.list.d/mono.list
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF
RUN apt-get update && apt-get install -y --no-install-recommends \
`# CSharp dependencies` \
mono-devel
`# Python dependencies` \
python-all \
python-all-dbg \
python-all-dev \
python-pip \
python-setuptools \
python-six \
python-twisted \
python-wheel \
python-zope.interface \
python3-all \
python3-all-dbg \
python3-all-dev \
python3-pip \
python3-setuptools \
python3-six \
python3-wheel \
python3-zope.interface && \
pip install -U ipaddress backports.ssl_match_hostname tornado && \
pip3 install -U backports.ssl_match_hostname tornado
# installing tornado by pip/pip3 instead of debian package
# if we install the debian package, the build fails in py2
RUN apt-get install -y --no-install-recommends \
`# D dependencies` \
xdg-utils \
`# Dart dependencies` \
dart \
`# Lua dependencies` \
lua5.2 \
lua5.2-dev \
`# MinGW dependencies` \
mingw32 \
mingw32-binutils \
mingw32-runtime \
nsis \
`# Clean up` \
&& rm -rf /var/cache/apt/* && \
`# Ruby dependencies` \
ruby \
ruby-dev \
ruby-bundler
RUN gem install bundler --no-ri --no-rdoc
RUN apt-get install -y --no-install-recommends \
`# Rust dependencies` \
cargo \
rustc
RUN apt-get install -y --no-install-recommends \
`# Static Code Analysis dependencies` \
cppcheck \
sloccount && \
pip install flake8
# Clean up
RUN rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /tmp/* && \
rm -rf /var/tmp/*
# Ruby
RUN gem install bundler --no-ri --no-rdoc
# Python optional dependencies
RUN pip2 install -U ipaddress backports.ssl_match_hostname tornado
RUN pip3 install -U backports.ssl_match_hostname tornado
# Go
RUN curl -sSL https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
ENV PATH /usr/local/go/bin:$PATH
# Haxe
RUN mkdir -p /usr/lib/haxe && \
wget -O - https://github.com/HaxeFoundation/haxe/releases/download/3.2.1/haxe-3.2.1-linux64.tar.gz | \
tar -C /usr/lib/haxe --strip-components=1 -xz && \
ln -s /usr/lib/haxe/haxe /usr/bin/haxe && \
ln -s /usr/lib/haxe/haxelib /usr/bin/haxelib && \
mkdir -p /usr/lib/haxe/lib && \
chmod -R 777 /usr/lib/haxe/lib && \
haxelib setup /usr/lib/haxe/lib && \
haxelib install hxcpp
# Node.js
# temporarily removed since this breaks the build (and is not needed to test C# code)
# RUN curl -sSL https://www.npmjs.com/install.sh | sh
# D
RUN curl -sSL http://downloads.dlang.org/releases/2.x/2.070.0/dmd_2.070.0-0_amd64.deb -o /tmp/dmd_2.070.0-0_amd64.deb && \
dpkg -i /tmp/dmd_2.070.0-0_amd64.deb && \
rm /tmp/dmd_2.070.0-0_amd64.deb && \
curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \
curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \
mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \
mv libevent-master/deimos/* openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \
mv libevent-master/C/* openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \
rm -rf libevent-master openssl-master && \
echo 'gcc -Wl,--no-as-needed $*' > /usr/local/bin/gcc-dmd && \
chmod 755 /usr/local/bin/gcc-dmd && \
echo 'CC=/usr/local/bin/gcc-dmd' >> /etc/dmd.conf
# Dart
ENV PATH /usr/lib/dart/bin:$PATH
# OCaml
RUN echo 'deb http://ppa.launchpad.net/avsm/ppa/ubuntu trusty main' > /etc/apt/sources.list.d/avsm-official-ocaml.list && \
gpg --keyserver keyserver.ubuntu.com --recv 61707B09 && \
gpg --export --armor 61707B09 | apt-key add - && \
apt-get update && \
apt-get install -y ocaml opam && \
opam init && \
opam install oasis
# Rust
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.17.0
ENV PATH /root/.cargo/bin:$PATH
ENV THRIFT_ROOT /thrift
RUN mkdir -p $THRIFT_ROOT/src
COPY Dockerfile $THRIFT_ROOT/

Some files were not shown because too many files have changed in this diff Show More