disable pure runner logging (#1313)

* disable pure runner logging

there's a racey bug where the logger is being written to when it's closing,
but this led to figuring out that we don't need the logger at all in pure
runner really, the syslog thing isn't an in process fn thing and we don't need
the logs from attach for anything further in pure runner. so this disables the
logger at the docker level, to save sending the bytes back over the wire, this
could be a nice little performance bump too. of course, with this, it means
agents can be configured to not log debug or have logs to store at all, and
not a lot of guards have been put on this for 'full' agent mode while it hangs
on a cross feeling the breeze awaiting its demise - the default configuration
remains the same, and no behavior changes in 'full' agent are here.

it was a lot smoother to make the noop than to try to plumb in 'nil' for
stdout/stderr, this has a lot lower risk of nil panic issues for the same
effect, though it's not perfect relying on type casting, plumbing in an
interface to check has the same issues (loss of interface adherence for any
decorator), so this seems ok. defaulting to not having a logger was similarly
painful, and ended up with this. but open to ideas.

* replace usage of old null reader writer impl

* make Read return io.EOF for io.Copy usage
This commit is contained in:
Reed Allman
2018-11-16 12:56:49 -06:00
committed by GitHub
parent 06cef103f6
commit 29fdbc9b49
6 changed files with 53 additions and 35 deletions

View File

@@ -172,6 +172,14 @@ func WithWriter(w io.Writer) CallOpt {
}
}
// WithLogger sets stderr to the provided one
func WithLogger(w io.ReadWriteCloser) CallOpt {
return func(c *call) error {
c.stderr = w
return nil
}
}
// InvokeDetached mark a call to be a detached call
func InvokeDetached() CallOpt {
return func(c *call) error {
@@ -198,8 +206,6 @@ func WithExtensions(extensions map[string]string) CallOpt {
}
// GetCall builds a Call that can be used to submit jobs to the agent.
//
// TODO where to put this? async and sync both call this
func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
var c call
@@ -241,8 +247,11 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
c.handler = a.da
c.ct = a
// TODO(reed): is line writer is vulnerable to attack?
c.stderr = setupLogger(c.req.Context(), a.cfg.MaxLogSize, !a.cfg.DisableDebugUserLogs, c.Call)
if c.stderr == nil {
// TODO(reed): is line writer is vulnerable to attack?
// XXX(reed): forcing this as default is not great / configuring it isn't great either. reconsider.
c.stderr = setupLogger(c.req.Context(), a.cfg.MaxLogSize, !a.cfg.DisableDebugUserLogs, c.Call)
}
if c.w == nil {
// send STDOUT to logs if no writer given (async...)
// TODO we could/should probably make this explicit to GetCall, ala 'WithLogger', but it's dupe code (who cares?)

View File

@@ -219,18 +219,20 @@ func (drv *DockerDriver) CreateCookie(ctx context.Context, task drivers.Containe
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "CreateCookie"})
stdinOn := task.Input() != nil
// XXX(reed): we can do same with stderr/stdout
_, stdinOff := task.Input().(common.NoopReadWriteCloser)
stdout, stderr := task.Logger()
_, stdoutOff := stdout.(common.NoopReadWriteCloser)
_, stderrOff := stderr.(common.NoopReadWriteCloser)
opts := docker.CreateContainerOptions{
Name: task.Id(),
Config: &docker.Config{
Image: task.Image(),
OpenStdin: stdinOn,
StdinOnce: stdinOn,
AttachStdin: stdinOn,
AttachStdout: true,
AttachStderr: true,
OpenStdin: !stdinOff,
StdinOnce: !stdinOff,
AttachStdin: !stdinOff,
AttachStdout: !stdoutOff,
AttachStderr: !stderrOff,
},
HostConfig: &docker.HostConfig{
ReadonlyRootfs: drv.conf.EnableReadOnlyRootFs,
@@ -284,6 +286,11 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
mwOut, mwErr := task.Logger()
successChan := make(chan struct{})
_, stdinOff := task.Input().(common.NoopReadWriteCloser)
stdout, stderr := task.Logger()
_, stdoutOff := stdout.(common.NoopReadWriteCloser)
_, stderrOff := stderr.(common.NoopReadWriteCloser)
waiter, err := drv.docker.AttachToContainerNonBlocking(ctx, docker.AttachToContainerOptions{
Container: container,
InputStream: task.Input(),
@@ -291,9 +298,9 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
ErrorStream: mwErr,
Success: successChan,
Stream: true,
Stdout: true,
Stderr: true,
Stdin: true,
Stdout: !stdoutOff,
Stderr: !stderrOff,
Stdin: !stdinOff,
})
if err == nil {

View File

@@ -87,20 +87,6 @@ type nopCloser struct {
func (n *nopCloser) Close() error { return nil }
type nullReadWriter struct {
io.ReadCloser
}
func (n nullReadWriter) Close() error {
return nil
}
func (n nullReadWriter) Read(b []byte) (int, error) {
return 0, io.EOF
}
func (n nullReadWriter) Write(b []byte) (int, error) {
return len(b), io.EOF
}
// multiWriteCloser ignores all errors from inner writers. you say, oh, this is a bad idea?
// yes, well, we were going to silence them all individually anyway, so let's not be shy about it.
// the main thing we need to ensure is that every close is called, even if another errors.

View File

@@ -156,7 +156,7 @@ func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
c.handler = a.cda
c.ct = a
c.stderr = &nullReadWriter{}
c.stderr = common.NoopReadWriteCloser{}
c.slotHashId = getSlotQueueKey(&c)
return &c, nil
}

View File

@@ -649,7 +649,8 @@ func (pr *pureRunner) handleTryCall(tc *runner.TryCall, state *callHandle) error
c.StartedAt = common.DateTime(time.Time{})
c.CompletedAt = common.DateTime(time.Time{})
agent_call, err := pr.a.GetCall(FromModelAndInput(&c, state.pipeToFnR),
agentCall, err := pr.a.GetCall(FromModelAndInput(&c, state.pipeToFnR),
WithLogger(common.NoopReadWriteCloser{}),
WithWriter(state),
WithContext(state.ctx),
WithExtensions(tc.GetExtensions()),
@@ -659,14 +660,14 @@ func (pr *pureRunner) handleTryCall(tc *runner.TryCall, state *callHandle) error
return err
}
state.c = agent_call.(*call)
state.c = agentCall.(*call)
if tc.SlotHashId != "" {
hashId, err := hex.DecodeString(tc.SlotHashId)
hashID, err := hex.DecodeString(tc.SlotHashId)
if err != nil {
state.enqueueCallResponse(err)
return err
}
state.c.slotHashId = string(hashId[:])
state.c.slotHashId = string(hashID[:])
}
if state.c.Type == models.TypeDetached {
@@ -784,14 +785,15 @@ func (pr *pureRunner) runStatusCall(ctx context.Context) *runner.RunnerStatus {
recorder := httptest.NewRecorder()
player := ioutil.NopCloser(strings.NewReader(c.Payload))
agent_call, err := pr.a.GetCall(FromModelAndInput(&c, player),
agentCall, err := pr.a.GetCall(FromModelAndInput(&c, player),
WithLogger(common.NoopReadWriteCloser{}),
WithWriter(recorder),
WithContext(execCtx),
)
if err == nil {
var mcall *call
mcall = agent_call.(*call)
mcall = agentCall.(*call)
err = pr.a.Submit(mcall)
}

View File

@@ -5,6 +5,20 @@ import (
"sync"
)
// NoopReadWriteCloser implements io.ReadWriteCloser, discarding all bytes, Read always returns EOF
type NoopReadWriteCloser struct{}
var _ io.ReadWriteCloser = NoopReadWriteCloser{}
// Read implements io.Reader
func (n NoopReadWriteCloser) Read(b []byte) (int, error) { return 0, io.EOF }
// Write implements io.Writer
func (n NoopReadWriteCloser) Write(b []byte) (int, error) { return len(b), nil }
// Close implements io.Closer
func (n NoopReadWriteCloser) Close() error { return nil }
type clampWriter struct {
w io.Writer
remaining int64