mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
Display outputs when executing odo run (#6865)
* Change NewRunHandler params with Options * Pass an options to RunHandler to show logs * Hide spinner and std output since outputs are displayed * Integration tests with failing command * Fix outputs * use raw terminal and local standard i/o streams * Fix podman i/o * Fix stdout/err * Test if in/out are terminal * command reference doc
This commit is contained in:
41
docs/website/docs/command-reference/run.md
Normal file
41
docs/website/docs/command-reference/run.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
title: odo run
|
||||
---
|
||||
|
||||
`odo run` is used to manually execute commands defined in a Devfile.
|
||||
|
||||
<details>
|
||||
<summary>Example</summary>
|
||||
|
||||
A command `connect` is defined in the Devfile, executing the `bash` command in the `runtime` component.
|
||||
|
||||
```yaml
|
||||
schemaVersion: 2.2.0
|
||||
[...]
|
||||
commands:
|
||||
- id: connect
|
||||
exec:
|
||||
component: runtime
|
||||
commandLine: bash
|
||||
[...]
|
||||
|
||||
```
|
||||
|
||||
```shell
|
||||
$ odo run connect
|
||||
bash-4.4$
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
For `Exec` commands, `odo dev` needs to be running, and `odo run`
|
||||
will execute commands in the containers deployed by the `odo dev` command.
|
||||
|
||||
Standard input is redirected to the command running in the container, and the terminal is configured in Raw mode. For these reasons, any character will be redirected to the command in container, including the Ctrl-c character which can thus be used to interrupt the command in container.
|
||||
|
||||
The `odo run` command terminates when the command in container terminates, and the exit status of `odo run` will reflect the exit status of the distant command: it will be `0` if the command in container terminates with status `0` and will be `1` if the command in container terminates with any other status.
|
||||
|
||||
Resources deployed with `Apply` commands will be deployed in *Dev mode*,
|
||||
and these resources will be deleted when `odo dev` terminates.
|
||||
|
||||
@@ -219,13 +219,14 @@ func (do *DeleteComponentClient) ExecutePreStopEvents(ctx context.Context, devfi
|
||||
do.kubeClient,
|
||||
do.execClient,
|
||||
do.configAutomountClient,
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing pre-stop command in container",
|
||||
// TODO(feloy) set these values when we want to support Apply Image commands for PreStop events
|
||||
nil, nil,
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PreStop events
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Executing pre-stop command in container",
|
||||
},
|
||||
)
|
||||
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, handler)
|
||||
if err != nil {
|
||||
|
||||
@@ -18,47 +18,67 @@ import (
|
||||
|
||||
const ShellExecutable string = "/bin/sh"
|
||||
|
||||
func ExecuteTerminatingCommand(ctx context.Context, execClient exec.Client, platformClient platform.Client, command devfilev1.Command, componentExists bool, podName string, appName string, componentName string, msg string, show bool) error {
|
||||
func ExecuteTerminatingCommand(
|
||||
ctx context.Context,
|
||||
execClient exec.Client,
|
||||
platformClient platform.Client,
|
||||
command devfilev1.Command,
|
||||
componentExists bool,
|
||||
podName string,
|
||||
appName string,
|
||||
componentName string,
|
||||
msg string,
|
||||
directRun bool,
|
||||
) error {
|
||||
|
||||
if componentExists && command.Exec != nil && pointer.BoolDeref(command.Exec.HotReloadCapable, false) {
|
||||
klog.V(2).Infof("command is hot-reload capable, not executing %q again", command.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
if msg == "" {
|
||||
msg = fmt.Sprintf("Executing %s command on container %q", command.Id, command.Exec.Component)
|
||||
} else {
|
||||
msg += " (command: " + command.Id + ")"
|
||||
}
|
||||
spinner := log.Spinner(msg)
|
||||
defer spinner.End(false)
|
||||
// Spinner is displayed only if no outputs are displayed
|
||||
var spinner *log.Status
|
||||
var stdoutWriter, stderrWriter *io.PipeWriter
|
||||
var stdoutChannel, stderrChannel chan interface{}
|
||||
|
||||
logger := machineoutput.NewMachineEventLoggingClient()
|
||||
stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := logger.CreateContainerOutputWriter()
|
||||
|
||||
cmdline := getCmdline(command)
|
||||
_, _, err := execClient.ExecuteCommand(ctx, cmdline, podName, command.Exec.Component, show, stdoutWriter, stderrWriter)
|
||||
|
||||
closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel)
|
||||
|
||||
spinner.End(err == nil)
|
||||
if err != nil {
|
||||
rd, errLog := Log(platformClient, componentName, appName, false, command)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
if !directRun {
|
||||
if msg == "" {
|
||||
msg = fmt.Sprintf("Executing %s command on container %q", command.Id, command.Exec.Component)
|
||||
} else {
|
||||
msg += " (command: " + command.Id + ")"
|
||||
}
|
||||
spinner = log.Spinner(msg)
|
||||
defer spinner.End(false)
|
||||
|
||||
// Use GetStderr in order to make sure that colour output is correct
|
||||
// on non-TTY terminals
|
||||
errLog = util.DisplayLog(false, rd, log.GetStderr(), componentName, -1)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
logger := machineoutput.NewMachineEventLoggingClient()
|
||||
stdoutWriter, stdoutChannel, stderrWriter, stderrChannel = logger.CreateContainerOutputWriter()
|
||||
}
|
||||
|
||||
cmdline := getCmdline(command, !directRun)
|
||||
_, _, err := execClient.ExecuteCommand(ctx, cmdline, podName, command.Exec.Component, directRun, stdoutWriter, stderrWriter)
|
||||
|
||||
if !directRun {
|
||||
closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel)
|
||||
spinner.End(err == nil)
|
||||
|
||||
if err != nil {
|
||||
rd, errLog := Log(platformClient, componentName, appName, false, command)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
}
|
||||
|
||||
// Use GetStderr in order to make sure that colour output is correct
|
||||
// on non-TTY terminals
|
||||
errLog = util.DisplayLog(false, rd, log.GetStderr(), componentName, -1)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getCmdline(command v1alpha2.Command) []string {
|
||||
func getCmdline(command v1alpha2.Command, redirectToPid1 bool) []string {
|
||||
// deal with environment variables
|
||||
var cmdLine string
|
||||
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)
|
||||
@@ -73,7 +93,10 @@ func getCmdline(command v1alpha2.Command) []string {
|
||||
// Redirecting to /proc/1/fd/* allows to redirect the process output to the output streams of PID 1 process inside the container.
|
||||
// This way, returning the container logs with 'odo logs' or 'kubectl logs' would work seamlessly.
|
||||
// See https://stackoverflow.com/questions/58716574/where-exactly-do-the-logs-of-kubernetes-pods-come-from-at-the-container-level
|
||||
redirectString := "1>>/proc/1/fd/1 2>>/proc/1/fd/2"
|
||||
redirectString := ""
|
||||
if redirectToPid1 {
|
||||
redirectString = "1>>/proc/1/fd/1 2>>/proc/1/fd/2"
|
||||
}
|
||||
var cmd []string
|
||||
if command.Exec.WorkingDir != "" {
|
||||
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
|
||||
|
||||
@@ -31,6 +31,7 @@ type runHandler struct {
|
||||
ComponentExists bool
|
||||
containersRunning []string
|
||||
msg string
|
||||
directRun bool
|
||||
|
||||
fs filesystem.Filesystem
|
||||
imageBackend image.Backend
|
||||
@@ -41,23 +42,29 @@ type runHandler struct {
|
||||
|
||||
var _ libdevfile.Handler = (*runHandler)(nil)
|
||||
|
||||
type HandlerOptions struct {
|
||||
PodName string
|
||||
ComponentExists bool
|
||||
ContainersRunning []string
|
||||
Msg string
|
||||
DirectRun bool
|
||||
|
||||
// For apply Kubernetes / Openshift
|
||||
Devfile parser.DevfileObj
|
||||
Path string
|
||||
}
|
||||
|
||||
func NewRunHandler(
|
||||
ctx context.Context,
|
||||
platformClient platform.Client,
|
||||
execClient exec.Client,
|
||||
configAutomountClient configAutomount.Client,
|
||||
podName string,
|
||||
componentExists bool,
|
||||
containersRunning []string,
|
||||
msg string,
|
||||
|
||||
// For building images
|
||||
fs filesystem.Filesystem,
|
||||
imageBackend image.Backend,
|
||||
|
||||
// For apply Kubernetes / Openshift
|
||||
devfile parser.DevfileObj,
|
||||
path string,
|
||||
options HandlerOptions,
|
||||
|
||||
) *runHandler {
|
||||
return &runHandler{
|
||||
@@ -65,16 +72,17 @@ func NewRunHandler(
|
||||
platformClient: platformClient,
|
||||
execClient: execClient,
|
||||
configAutomountClient: configAutomountClient,
|
||||
podName: podName,
|
||||
ComponentExists: componentExists,
|
||||
containersRunning: containersRunning,
|
||||
msg: msg,
|
||||
podName: options.PodName,
|
||||
ComponentExists: options.ComponentExists,
|
||||
containersRunning: options.ContainersRunning,
|
||||
msg: options.Msg,
|
||||
directRun: options.DirectRun,
|
||||
|
||||
fs: fs,
|
||||
imageBackend: imageBackend,
|
||||
|
||||
devfile: devfile,
|
||||
path: path,
|
||||
devfile: options.Devfile,
|
||||
path: options.Path,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +137,7 @@ func (a *runHandler) ExecuteTerminatingCommand(ctx context.Context, command devf
|
||||
appName = odocontext.GetApplication(a.ctx)
|
||||
)
|
||||
if isContainerRunning(command.Exec.Component, a.containersRunning) {
|
||||
return ExecuteTerminatingCommand(ctx, a.execClient, a.platformClient, command, a.ComponentExists, a.podName, appName, componentName, a.msg, false)
|
||||
return ExecuteTerminatingCommand(ctx, a.execClient, a.platformClient, command, a.ComponentExists, a.podName, appName, componentName, a.msg, a.directRun)
|
||||
}
|
||||
switch platform := a.platformClient.(type) {
|
||||
case kclient.ClientInterface:
|
||||
|
||||
@@ -44,14 +44,12 @@ func (o *DeployClient) Deploy(ctx context.Context) error {
|
||||
o.kubeClient,
|
||||
nil,
|
||||
o.configAutomountClient,
|
||||
"",
|
||||
false,
|
||||
nil,
|
||||
"",
|
||||
o.fs,
|
||||
image.SelectBackend(ctx),
|
||||
*devfileObj,
|
||||
path,
|
||||
component.HandlerOptions{
|
||||
Devfile: *devfileObj,
|
||||
Path: path,
|
||||
},
|
||||
)
|
||||
|
||||
err := o.buildPushAutoImageComponents(handler, *devfileObj)
|
||||
|
||||
@@ -38,15 +38,16 @@ func Run(
|
||||
platformClient,
|
||||
execClient,
|
||||
configAutomountClient,
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing command in container",
|
||||
|
||||
filesystem,
|
||||
image.SelectBackend(ctx),
|
||||
*devfileObj,
|
||||
devfilePath,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Executing command in container",
|
||||
DirectRun: true,
|
||||
Devfile: *devfileObj,
|
||||
Path: devfilePath,
|
||||
},
|
||||
)
|
||||
|
||||
return libdevfile.ExecuteCommandByName(ctx, *devfileObj, commandName, handler, false)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
parsercommon "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
@@ -95,13 +94,13 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing post-start command in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
nil, nil,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Executing post-start command in container",
|
||||
},
|
||||
)
|
||||
err = libdevfile.ExecPostStartEvents(ctx, parameters.Devfile, handler)
|
||||
if err != nil {
|
||||
@@ -127,15 +126,14 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.GetName(),
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"",
|
||||
|
||||
o.filesystem,
|
||||
image.SelectBackend(ctx),
|
||||
parameters.Devfile,
|
||||
path,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.GetName(),
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Devfile: parameters.Devfile,
|
||||
Path: path,
|
||||
},
|
||||
)
|
||||
|
||||
if commandType == devfilev1.ExecCommandType {
|
||||
@@ -166,13 +164,14 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.Name,
|
||||
running,
|
||||
component.GetContainersNames(pod),
|
||||
"Building your application in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
nil, nil,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ComponentExists: running,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Building your application in container",
|
||||
},
|
||||
)
|
||||
return libdevfile.Build(ctx, parameters.Devfile, parameters.StartOptions.BuildCommand, execHandler)
|
||||
}
|
||||
|
||||
@@ -69,13 +69,13 @@ func (o *DevClient) reconcile(
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing post-start command in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
nil, nil,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Executing post-start command in container",
|
||||
},
|
||||
)
|
||||
err = libdevfile.ExecPostStartEvents(ctx, devfileObj, execHandler)
|
||||
if err != nil {
|
||||
@@ -91,13 +91,14 @@ func (o *DevClient) reconcile(
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
componentStatus.RunExecuted,
|
||||
component.GetContainersNames(pod),
|
||||
"Building your application in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PreStop events
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
nil, nil,
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ComponentExists: componentStatus.RunExecuted,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
Msg: "Building your application in container",
|
||||
},
|
||||
)
|
||||
return libdevfile.Build(ctx, devfileObj, options.BuildCommand, execHandler)
|
||||
}
|
||||
@@ -119,16 +120,14 @@ func (o *DevClient) reconcile(
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
componentStatus.RunExecuted,
|
||||
component.GetContainersNames(pod),
|
||||
"",
|
||||
|
||||
o.fs,
|
||||
image.SelectBackend(ctx),
|
||||
|
||||
// TODO(feloy) set to deploy Kubernetes/Openshift components
|
||||
parser.DevfileObj{}, "",
|
||||
component.HandlerOptions{
|
||||
PodName: pod.Name,
|
||||
ComponentExists: componentStatus.RunExecuted,
|
||||
ContainersRunning: component.GetContainersNames(pod),
|
||||
},
|
||||
)
|
||||
err = libdevfile.ExecuteCommandByNameAndKind(ctx, devfileObj, cmdName, cmdKind, cmdHandler, false)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubectl/pkg/util/term"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/platform"
|
||||
@@ -26,46 +27,59 @@ func NewExecClient(platformClient platform.Client) *ExecClient {
|
||||
|
||||
// ExecuteCommand executes the given command in the pod's container,
|
||||
// writing the output to the specified respective pipe writers
|
||||
func (o ExecClient) ExecuteCommand(ctx context.Context, command []string, podName string, containerName string, show bool, stdoutWriter *io.PipeWriter, stderrWriter *io.PipeWriter) (stdout []string, stderr []string, err error) {
|
||||
soutReader, soutWriter := io.Pipe()
|
||||
serrReader, serrWriter := io.Pipe()
|
||||
// when directRun is true, will execute the command with terminal in Raw mode and connected to local standard I/Os
|
||||
// so input, including Ctrl-c, is sent to the remote process
|
||||
func (o ExecClient) ExecuteCommand(ctx context.Context, command []string, podName string, containerName string, directRun bool, stdoutWriter *io.PipeWriter, stderrWriter *io.PipeWriter) (stdout []string, stderr []string, err error) {
|
||||
if !directRun {
|
||||
soutReader, soutWriter := io.Pipe()
|
||||
serrReader, serrWriter := io.Pipe()
|
||||
|
||||
klog.V(2).Infof("Executing command %v for pod: %v in container: %v", command, podName, containerName)
|
||||
klog.V(2).Infof("Executing command %v for pod: %v in container: %v", command, podName, containerName)
|
||||
|
||||
// Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil)
|
||||
stdoutCompleteChannel := startReaderGoroutine(soutReader, show, &stdout, stdoutWriter)
|
||||
stderrCompleteChannel := startReaderGoroutine(serrReader, show, &stderr, stderrWriter)
|
||||
// Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil)
|
||||
stdoutCompleteChannel := startReaderGoroutine(os.Stdout, soutReader, directRun, &stdout, stdoutWriter)
|
||||
stderrCompleteChannel := startReaderGoroutine(os.Stderr, serrReader, directRun, &stderr, stderrWriter)
|
||||
|
||||
err = o.platformClient.ExecCMDInContainer(ctx, containerName, podName, command, soutWriter, serrWriter, nil, false)
|
||||
err = o.platformClient.ExecCMDInContainer(ctx, containerName, podName, command, soutWriter, serrWriter, nil, false)
|
||||
|
||||
// Block until we have received all the container output from each stream
|
||||
_ = soutWriter.Close()
|
||||
<-stdoutCompleteChannel
|
||||
_ = serrWriter.Close()
|
||||
<-stderrCompleteChannel
|
||||
// Block until we have received all the container output from each stream
|
||||
_ = soutWriter.Close()
|
||||
<-stdoutCompleteChannel
|
||||
_ = serrWriter.Close()
|
||||
<-stderrCompleteChannel
|
||||
|
||||
if err != nil {
|
||||
// It is safe to read from stdout and stderr here, as the goroutines are guaranteed to have terminated at this point.
|
||||
klog.V(2).Infof("ExecuteCommand returned an an err: %v. for command '%v'\nstdout: %v\nstderr: %v",
|
||||
err, command, stdout, stderr)
|
||||
// Details are displayed only if no outputs are displayed
|
||||
if err != nil && !directRun {
|
||||
// It is safe to read from stdout and stderr here, as the goroutines are guaranteed to have terminated at this point.
|
||||
klog.V(2).Infof("ExecuteCommand returned an an err: %v. for command '%v'\nstdout: %v\nstderr: %v",
|
||||
err, command, stdout, stderr)
|
||||
|
||||
msg := fmt.Sprintf("unable to exec command %v", command)
|
||||
if len(stdout) != 0 {
|
||||
msg += fmt.Sprintf("\n=== stdout===\n%s", strings.Join(stdout, "\n"))
|
||||
msg := fmt.Sprintf("unable to exec command %v", command)
|
||||
if len(stdout) != 0 {
|
||||
msg += fmt.Sprintf("\n=== stdout===\n%s", strings.Join(stdout, "\n"))
|
||||
}
|
||||
if len(stderr) != 0 {
|
||||
msg += fmt.Sprintf("\n=== stderr===\n%s", strings.Join(stderr, "\n"))
|
||||
}
|
||||
return stdout, stderr, fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
||||
if len(stderr) != 0 {
|
||||
msg += fmt.Sprintf("\n=== stderr===\n%s", strings.Join(stderr, "\n"))
|
||||
}
|
||||
return stdout, stderr, fmt.Errorf("%s: %w", msg, err)
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
tty := setupTTY()
|
||||
|
||||
fn := func() error {
|
||||
return o.platformClient.ExecCMDInContainer(ctx, containerName, podName, command, tty.Out, os.Stderr, tty.In, tty.Raw)
|
||||
}
|
||||
|
||||
return nil, nil, tty.Safe(fn)
|
||||
}
|
||||
|
||||
// This goroutine will automatically pipe the output from the writer (passed into ExecCMDInContainer) to
|
||||
// the loggers.
|
||||
// The returned channel will contain a single nil entry once the reader has closed.
|
||||
func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *[]string, consoleOutput *io.PipeWriter) chan interface{} {
|
||||
func startReaderGoroutine(logWriter io.Writer, reader io.Reader, show bool, cmdOutput *[]string, consoleOutput *io.PipeWriter) chan interface{} {
|
||||
result := make(chan interface{})
|
||||
|
||||
go func() {
|
||||
@@ -74,7 +88,7 @@ func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *[]string, cons
|
||||
line := scanner.Text()
|
||||
|
||||
if show {
|
||||
_, err := fmt.Fprintln(os.Stdout, line)
|
||||
_, err := fmt.Fprintln(logWriter, line)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to print to stdout: %s", err.Error())
|
||||
}
|
||||
@@ -98,3 +112,15 @@ func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *[]string, cons
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func setupTTY() term.TTY {
|
||||
tty := term.TTY{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
}
|
||||
if !tty.IsTerminalIn() || !tty.IsTerminalOut() {
|
||||
return tty
|
||||
}
|
||||
tty.Raw = true
|
||||
return tty
|
||||
}
|
||||
|
||||
@@ -23,13 +23,9 @@ func (o *PodmanCli) ExecCMDInContainer(ctx context.Context, containerName, podNa
|
||||
args = append(args, cmd...)
|
||||
|
||||
command := exec.CommandContext(ctx, o.podmanCmd, append(o.containerRunGlobalExtraArgs, args...)...)
|
||||
klog.V(3).Infof("executing %v", command.Args)
|
||||
command.Stdout = stdout
|
||||
command.Stderr = stderr
|
||||
command.Stdin = stdin
|
||||
|
||||
out, err := command.Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = stdout.Write(out)
|
||||
return err
|
||||
klog.V(3).Infof("executing %v", command.Args)
|
||||
return command.Run()
|
||||
}
|
||||
|
||||
@@ -68,19 +68,21 @@ commands:
|
||||
workingDir: ${PROJECTS_ROOT}
|
||||
group:
|
||||
kind: run
|
||||
- id: create-file
|
||||
- id: list-files
|
||||
exec:
|
||||
component: runtime
|
||||
commandLine: touch /tmp/new-file
|
||||
workingDir: ${PROJECTS_ROOT}
|
||||
- id: create-file-in-other-container
|
||||
commandLine: ls /
|
||||
- id: list-files-in-other-container
|
||||
exec:
|
||||
component: other-container
|
||||
commandLine: touch /tmp/new-file-in-other-container
|
||||
workingDir: ${PROJECTS_ROOT}
|
||||
commandLine: ls /
|
||||
- id: deploy-config
|
||||
apply:
|
||||
component: config
|
||||
- id: build-image
|
||||
apply:
|
||||
component: image
|
||||
- id: error-cmd
|
||||
exec:
|
||||
component: runtime
|
||||
commandLine: ls /not-found
|
||||
|
||||
@@ -3,9 +3,7 @@ package integration
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/labels"
|
||||
"github.com/redhat-developer/odo/tests/helper"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -101,18 +99,14 @@ var _ = Describe("odo run command tests", func() {
|
||||
platform = "podman"
|
||||
}
|
||||
|
||||
By("executing an exec command", func() {
|
||||
output := helper.Cmd("odo", "run", "create-file", "--platform", platform).ShouldPass().Out()
|
||||
Expect(output).To(ContainSubstring("Executing command in container (command: create-file)"))
|
||||
component := helper.NewComponent(cmpName, "app", labels.ComponentDevMode, commonVar.Project, commonVar.CliRunner)
|
||||
component.Exec("runtime", []string{"ls", "/tmp/new-file"}, pointer.Bool(true))
|
||||
By("executing an exec command and displaying output", func() {
|
||||
output := helper.Cmd("odo", "run", "list-files", "--platform", platform).ShouldPass().Out()
|
||||
Expect(output).To(ContainSubstring("etc"))
|
||||
})
|
||||
|
||||
By("executing an exec command in another container", func() {
|
||||
output := helper.Cmd("odo", "run", "create-file-in-other-container", "--platform", platform).ShouldPass().Out()
|
||||
Expect(output).To(ContainSubstring("Executing command in container (command: create-file-in-other-container)"))
|
||||
component := helper.NewComponent(cmpName, "app", labels.ComponentDevMode, commonVar.Project, commonVar.CliRunner)
|
||||
component.Exec("other-container", []string{"ls", "/tmp/new-file-in-other-container"}, pointer.Bool(true))
|
||||
By("executing an exec command in another container and displaying output", func() {
|
||||
output := helper.Cmd("odo", "run", "list-files-in-other-container", "--platform", platform).ShouldPass().Out()
|
||||
Expect(output).To(ContainSubstring("etc"))
|
||||
})
|
||||
|
||||
if !podman {
|
||||
@@ -140,6 +134,11 @@ var _ = Describe("odo run command tests", func() {
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
By("exiting with a status 1 when the exec command fails and displaying error output", func() {
|
||||
out := helper.Cmd("odo", "run", "error-cmd", "--platform", platform).ShouldFail().Err()
|
||||
Expect(out).To(ContainSubstring("No such file or directory"))
|
||||
})
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user