Files
fn-serverless/api/agent/func_logger.go
Reed Allman cbe0d5e9ac add user syslog writers to app (#970)
* add user syslog writers to app

users may specify a syslog url[s] on apps now and all functions under that app
will spew their logs out to it. the docs have more information around details
there, please review those (swagger and operating/logging.md), tried to
implement to spec in some parts and improve others, open to feedback on
format though, lots of liberty there.

design decision wise, I am looking to the future and ignoring cold containers.
the overhead of the connections there will not be worth it, so this feature
only works for hot functions, since we're killing cold anyway (even if a user
can just straight up exit a hot container).

syslog connections will be opened against a container when it starts up, and
then the call id that is logged gets swapped out for each call that goes
through the container, this cuts down on the cost of opening/closing
connections significantly. there are buffers to accumulate logs until we get a
`\n` to actually write a syslog line, and a buffer to save some bytes when
we're writing the syslog formatting as well. underneath writers re-use the
line writer in certain scenarios (swapper). we could likely improve the ease
of setting this up, but opening the syslog conns against a container seems
worth it, and is a different path than the other func loggers that we create
when we make a call object. the Close() stuff is a little tricky, not sure how
to make it easier and have the ^ benefits, open to idears.

this does add another vector of 'limits' to consider for more strict service
operators. one being how many syslog urls can a user add to an app (infinite,
atm) and the other being on the order of number of containers per host we
could run out of connections in certain scenarios. there may be some utility
in having multiple syslog sinks to send to, it could help with debugging at
times to send to another destination or if a user is a client w/ someone and
both want the function logs, e.g. (have used this for that in the past,
specifically).

this also doesn't work behind a proxy, which is something i'm open to fixing,
but afaict will require a 3rd party dependency (we can pretty much steal what
docker does). this is mostly of utility for those of us that work behind a
proxy all the time, not really for end users.

there are some unit tests. integration tests for this don't sound very fun to
maintain. I did test against papertrail with each protocol and it works (and
even times out if you're behind a proxy!).

closes #337

* add trace to syslog dial
2018-05-15 11:00:26 -07:00

222 lines
5.9 KiB
Go

package agent
import (
"bytes"
"context"
"fmt"
"io"
"sync"
"github.com/fnproject/fn/api/models"
"github.com/sirupsen/logrus"
)
var (
bufPool = &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }}
logPool = &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }}
)
// setupLogger returns a ReadWriteCloser that may have:
// * [always] writes bytes to a size limited buffer, that can be read from using io.Reader
// * [always] writes bytes per line to stderr as DEBUG
//
// To prevent write failures from failing the call or any other writes,
// multiWriteCloser ignores errors. Close will flush the line writers
// appropriately. The returned io.ReadWriteCloser is not safe for use after
// calling Close.
func setupLogger(ctx context.Context, maxSize uint64, c *models.Call) io.ReadWriteCloser {
lbuf := bufPool.Get().(*bytes.Buffer)
dbuf := logPool.Get().(*bytes.Buffer)
close := func() error {
// TODO we may want to toss out buffers that grow to grotesque size but meh they will prob get GC'd
lbuf.Reset()
dbuf.Reset()
bufPool.Put(lbuf)
logPool.Put(dbuf)
return nil
}
// we don't need to log per line to db, but we do need to limit it
limitw := &nopCloser{newLimitWriter(int(maxSize), dbuf)}
// accumulate all line writers, wrap in same line writer (to re-use buffer)
stderrLogger := logrus.WithFields(logrus.Fields{"user_log": true, "app_id": c.AppID, "path": c.Path, "image": c.Image, "call_id": c.ID})
loggo := &nopCloser{&logWriter{stderrLogger}}
// we don't need to limit the log writer(s), but we do need it to dispense lines
linew := newLineWriterWithBuffer(lbuf, loggo)
mw := multiWriteCloser{linew, limitw, &fCloser{close}}
return &rwc{mw, dbuf}
}
// implements io.ReadWriteCloser, fmt.Stringer and Bytes()
// TODO WriteString and ReadFrom would be handy to implement,
// ReadFrom is a little involved.
type rwc struct {
io.WriteCloser
// buffer is not embedded since it would bypass calls to WriteCloser.Write
// in cases such as WriteString and ReadFrom
b *bytes.Buffer
}
func (r *rwc) Read(b []byte) (int, error) { return r.b.Read(b) }
func (r *rwc) String() string { return r.b.String() }
func (r *rwc) Bytes() []byte { return r.b.Bytes() }
// implements passthrough Write & closure call in Close
type fCloser struct {
close func() error
}
func (f *fCloser) Write(b []byte) (int, error) { return len(b), nil }
func (f *fCloser) Close() error { return f.close() }
type nopCloser struct {
io.Writer
}
func (n *nopCloser) Close() error { return nil }
type nullReadWriter struct {
io.ReadCloser
}
func (n nullReadWriter) Close() error {
return nil
}
func (n nullReadWriter) Read(b []byte) (int, error) {
return 0, io.EOF
}
func (n nullReadWriter) Write(b []byte) (int, error) {
return len(b), io.EOF
}
// multiWriteCloser ignores all errors from inner writers. you say, oh, this is a bad idea?
// yes, well, we were going to silence them all individually anyway, so let's not be shy about it.
// the main thing we need to ensure is that every close is called, even if another errors.
// XXX(reed): maybe we should log it (for syslog, it may help debug, maybe we just log that one)
type multiWriteCloser []io.WriteCloser
func (m multiWriteCloser) Write(b []byte) (n int, err error) {
for _, mw := range m {
mw.Write(b)
}
return len(b), nil
}
func (m multiWriteCloser) Close() (err error) {
for _, mw := range m {
mw.Close()
}
return nil
}
// logWriter will log (to real stderr) every call to Write as a line. it should
// be wrapped with a lineWriter so that the output makes sense.
type logWriter struct {
logrus.FieldLogger
}
func (l *logWriter) Write(b []byte) (int, error) {
l.Debug(string(b))
return len(b), nil
}
// lineWriter buffers all calls to Write and will call Write
// on the underlying writer once per new line. Close must
// be called to ensure that the buffer is flushed, and a newline
// will be appended in Close if none is present.
type lineWriter struct {
b *bytes.Buffer
w io.WriteCloser
}
func newLineWriter(w io.WriteCloser) io.WriteCloser {
return &lineWriter{b: new(bytes.Buffer), w: w}
}
func newLineWriterWithBuffer(b *bytes.Buffer, w io.WriteCloser) io.WriteCloser {
return &lineWriter{b: b, w: w}
}
func (li *lineWriter) Write(ogb []byte) (int, error) {
li.b.Write(ogb) // bytes.Buffer is guaranteed, read it!
for {
b := li.b.Bytes()
i := bytes.IndexByte(b, '\n')
if i < 0 {
break // no more newlines in buffer
}
// write in this line and advance buffer past it
l := b[:i+1]
ns, err := li.w.Write(l)
if err != nil {
return ns, err
}
li.b.Next(len(l))
}
// technically we wrote all the bytes, so make things appear normal
return len(ogb), nil
}
func (li *lineWriter) Close() error {
defer li.w.Close() // MUST close this (after writing last line)
// flush the remaining bytes in the buffer to underlying writer, adding a
// newline if needed
b := li.b.Bytes()
if len(b) == 0 {
return nil
}
if b[len(b)-1] != '\n' {
b = append(b, '\n')
}
_, err := li.w.Write(b)
return err
}
// io.Writer that allows limiting bytes written to w
// TODO change to use clamp writer, this is dupe code
type limitDiscardWriter struct {
n, max int
io.Writer
}
func newLimitWriter(max int, w io.Writer) io.Writer {
return &limitDiscardWriter{max: max, Writer: w}
}
func (l *limitDiscardWriter) Write(b []byte) (int, error) {
inpLen := len(b)
if l.n >= l.max {
return inpLen, nil
}
if l.n+inpLen >= l.max {
// cut off to prevent gigantic line attack
b = b[:l.max-l.n]
}
n, err := l.Writer.Write(b)
l.n += n
if l.n >= l.max {
// write in truncation message to log once
l.Writer.Write([]byte(fmt.Sprintf("\n-----max log size %d bytes exceeded, truncating log-----\n", l.max)))
} else if n != len(b) {
// Is this truly a partial write? We'll be honest if that's the case.
return n, err
}
// yes, we lie... this is to prevent callers to blow up, we always pretend
// that we were able to write the entire buffer.
return inpLen, err
}