update vendor/ dir to latest w/o heroku, moby

had to lock a lot of things in place
This commit is contained in:
Reed Allman
2017-08-03 02:38:15 -07:00
parent 780791da1c
commit 30f3c45dbc
5637 changed files with 191713 additions and 1133103 deletions

View File

@@ -3,13 +3,13 @@ package twitter
import (
"fmt"
"net/http"
"time"
"github.com/dghubble/sling"
)
// Tweet represents a Twitter Tweet, previously called a status.
// https://dev.twitter.com/overview/api/tweets
// Deprecated fields: Contributors, Geo, Annotations
type Tweet struct {
Coordinates *Coordinates `json:"coordinates"`
CreatedAt string `json:"created_at"`
@@ -48,6 +48,11 @@ type Tweet struct {
QuotedStatus *Tweet `json:"quoted_status"`
}
// CreatedAtTime is a convenience wrapper that returns the Created_at time, parsed as a time.Time struct
func (t Tweet) CreatedAtTime() (time.Time, error) {
return time.Parse(time.RubyDate, t.CreatedAt)
}
// ExtendedTweet represents fields embedded in extended Tweets when served in
// compatibility mode (default).
// https://dev.twitter.com/overview/api/upcoming-changes-to-tweets

View File

@@ -1,7 +1,9 @@
package twitter
import (
"strings"
"bufio"
"bytes"
"io"
"time"
)
@@ -19,38 +21,75 @@ func stopped(done <-chan struct{}) bool {
// or until at least the duration d has elapsed, whichever comes first. This
// is similar to time.Sleep(d), except it can be interrupted.
func sleepOrDone(d time.Duration, done <-chan struct{}) {
sleep := time.NewTimer(d)
defer sleep.Stop()
select {
case <-time.After(d):
case <-sleep.C:
return
case <-done:
return
}
}
// scanLines is a split function for a Scanner that returns each line of text
// stripped of the end-of-line marker "\r\n" used by Twitter Streaming APIs.
// This differs from the bufio.ScanLines split function which considers the
// '\r' optional.
// https://dev.twitter.com/streaming/overview/processing
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\r\n"); i >= 0 {
// We have a full '\r\n' terminated line.
return i + 2, data[0:i], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), dropCR(data), nil
}
// Request more data.
return 0, nil, nil
// streamResponseBodyReader is a buffered reader for Twitter stream response
// body. It can scan the arbitrary length of response body unlike bufio.Scanner.
type streamResponseBodyReader struct {
reader *bufio.Reader
buf bytes.Buffer
}
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\n' {
return data[0 : len(data)-1]
}
return data
// newStreamResponseBodyReader returns an instance of streamResponseBodyReader
// for the given Twitter stream response body.
func newStreamResponseBodyReader(body io.Reader) *streamResponseBodyReader {
return &streamResponseBodyReader{reader: bufio.NewReader(body)}
}
// readNext reads Twitter stream response body and returns the next stream
// content if exists. Returns io.EOF error if we reached the end of the stream
// and there's no more message to read.
func (r *streamResponseBodyReader) readNext() ([]byte, error) {
// Discard all the bytes from buf and continue to use the allocated memory
// space for reading the next message.
r.buf.Truncate(0)
for {
// Twitter stream messages are separated with "\r\n", and a valid
// message may sometimes contain '\n' in the middle.
// bufio.Reader.Read() can accept one byte delimiter only, so we need to
// first break out each line on '\n' and then check whether the line ends
// with "\r\n" to find message boundaries.
// https://dev.twitter.com/streaming/overview/processing
line, err := r.reader.ReadBytes('\n')
// Non-EOF error should be propagated to callers immediately.
if err != nil && err != io.EOF {
return nil, err
}
// EOF error means that we reached the end of the stream body before finding
// delimiter '\n'. If "line" is empty, it means the reader didn't read any
// data from the stream before reaching EOF and there's nothing to append to
// buf.
if err == io.EOF && len(line) == 0 {
// if buf has no data, propagate io.EOF to callers and let them know that
// we've finished processing the stream.
if r.buf.Len() == 0 {
return nil, err
}
// Otherwise, we still have a remaining stream message to return.
break
}
// If the line ends with "\r\n", it's the end of one stream message data.
if bytes.HasSuffix(line, []byte("\r\n")) {
// reader.ReadBytes() returns a slice including the delimiter itself, so
// we need to trim '\n' as well as '\r' from the end of the slice.
r.buf.Write(bytes.TrimRight(line, "\r\n"))
break
}
// Otherwise, the line is not the end of a stream message, so we append
// the line to buf and continue to scan lines.
r.buf.Write(line)
}
// Get the stream message bytes from buf. Not that Bytes() won't mark the
// returned data as "read", and we need to explicitly call Truncate(0) to
// discard from buf before writing the next stream message to buf.
return r.buf.Bytes(), nil
}

View File

@@ -1,6 +1,10 @@
package twitter
import (
"bufio"
"bytes"
"io"
"strings"
"testing"
"time"
@@ -40,25 +44,68 @@ func TestSleepOrDone_Done(t *testing.T) {
assertDone(t, completed, defaultTestTimeout)
}
func TestScanLines(t *testing.T) {
func TestStreamResponseBodyReader(t *testing.T) {
cases := []struct {
input []byte
atEOF bool
advance int
token []byte
in []byte
want [][]byte
}{
{[]byte("Line 1\r\n"), false, 8, []byte("Line 1")},
{[]byte("Line 1\n"), false, 0, nil},
{[]byte("Line 1"), false, 0, nil},
{[]byte(""), false, 0, nil},
{[]byte("Line 1\r\n"), true, 8, []byte("Line 1")},
{[]byte("Line 1\n"), true, 7, []byte("Line 1")},
{[]byte("Line 1"), true, 6, []byte("Line 1")},
{[]byte(""), true, 0, nil},
{
in: []byte("foo\r\nbar\r\n"),
want: [][]byte{
[]byte("foo"),
[]byte("bar"),
},
},
{
in: []byte("foo\nbar\r\n"),
want: [][]byte{
[]byte("foo\nbar"),
},
},
{
in: []byte("foo\r\n\r\n"),
want: [][]byte{
[]byte("foo"),
[]byte(""),
},
},
{
in: []byte("foo\r\nbar"),
want: [][]byte{
[]byte("foo"),
[]byte("bar"),
},
},
{
// Message length is more than bufio.MaxScanTokenSize, which can't be
// parsed by bufio.Scanner with default buffer size.
in: []byte(strings.Repeat("X", bufio.MaxScanTokenSize+1) + "\r\n"),
want: [][]byte{
[]byte(strings.Repeat("X", bufio.MaxScanTokenSize+1)),
},
},
}
for _, c := range cases {
advance, token, _ := scanLines(c.input, c.atEOF)
assert.Equal(t, c.advance, advance)
assert.Equal(t, c.token, token)
body := bytes.NewReader(c.in)
reader := newStreamResponseBodyReader(body)
for i, want := range c.want {
data, err := reader.readNext()
if err != nil {
t.Errorf("reader(%q).readNext() * %d: err == %q, want nil", c.in, i, err)
}
if !bytes.Equal(data, want) {
t.Errorf("reader(%q).readNext() * %d: data == %q, want %q", c.in, i, data, want)
}
}
data, err := reader.readNext()
if err != io.EOF {
t.Errorf("reader(%q).readNext() * %d: err == %q, want io.EOF", c.in, len(c.want), err)
}
if len(data) != 0 {
t.Errorf("reader(%q).readNext() * %d: data == %q, want \"\"", c.in, len(c.want), data)
}
}
}

View File

@@ -1,7 +1,6 @@
package twitter
import (
"bufio"
"encoding/json"
"io"
"net/http"
@@ -228,23 +227,20 @@ func (s *Stream) retry(req *http.Request, expBackOff backoff.BackOff, aggExpBack
// receive scans a stream response body, JSON decodes tokens to messages, and
// sends messages to the Messages channel. Receiving continues until an EOF,
// scan error, or the done channel is closed.
func (s *Stream) receive(body io.ReadCloser) {
defer body.Close()
// A bufio.Scanner steps through 'tokens' of data on each Scan() using a
// SplitFunc. SplitFunc tokenizes input bytes to return the number of bytes
// to advance, the token slice of bytes, and any errors.
scanner := bufio.NewScanner(body)
// default ScanLines SplitFunc is incorrect for Twitter Streams, set custom
scanner.Split(scanLines)
for !stopped(s.done) && scanner.Scan() {
token := scanner.Bytes()
if len(token) == 0 {
func (s *Stream) receive(body io.Reader) {
reader := newStreamResponseBodyReader(body)
for !stopped(s.done) {
data, err := reader.readNext()
if err != nil {
return
}
if len(data) == 0 {
// empty keep-alive
continue
}
select {
// send messages, data, or errors
case s.Messages <- getMessage(token):
case s.Messages <- getMessage(data):
continue
// allow client to Stop(), even if not receiving
case <-s.done:

View File

@@ -3,6 +3,7 @@ package twitter
import (
"fmt"
"net/http"
"strings"
"sync"
"testing"
@@ -205,6 +206,46 @@ func TestStream_User(t *testing.T) {
assert.Equal(t, expectedCounts, counts)
}
func TestStream_User_TooManyFriends(t *testing.T) {
httpClient, mux, server := testServer()
defer server.Close()
reqCount := 0
mux.HandleFunc("/1.1/user.json", func(w http.ResponseWriter, r *http.Request) {
assertMethod(t, "GET", r)
assertQuery(t, map[string]string{"stall_warnings": "true", "with": "followings"}, r)
switch reqCount {
case 0:
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Transfer-Encoding", "chunked")
// The first friend list message is more than bufio.MaxScanTokenSize (65536) bytes
friendsList := "[" + strings.Repeat("1234567890, ", 7000) + "1234567890]"
fmt.Fprintf(w, `{"friends": %s}`+"\r\n"+"\r\n", friendsList)
default:
// Only allow first request
http.Error(w, "Stream API not available!", 130)
}
reqCount++
})
counts := &counter{}
demux := newCounterDemux(counts)
client := NewClient(httpClient)
streamUserParams := &StreamUserParams{
StallWarnings: Bool(true),
With: "followings",
}
stream, err := client.Streams.User(streamUserParams)
// assert that the expected messages are received
assert.NoError(t, err)
defer stream.Stop()
for message := range stream.Messages {
demux.Handle(message)
}
expectedCounts := &counter{all: 1, friendsList: 1}
assert.Equal(t, expectedCounts, counts)
}
func TestStream_Site(t *testing.T) {
httpClient, mux, server := testServer()
defer server.Close()