mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
Use a single handler for executing all commands (#6826)
* Document current implementations of command handlers * Add unit tests for execHAndler * Refactor pkg/devfile/image to inject Backend as dependency * Use same handler for kubedev/podmandev * Fail after SelectBackend==nil only if backend is needed * Move runHandler to dev/common * Unit tests for runHandler * Create a component.ExecuteTerminatingCommand * ExecuteTerminatingCommand/ExecuteNonTerminatingCommand for Handler * Fix calling other command types * Consider parent group to determine if a command is terminating * Replace component.execHandler by common.runHandler * Remove execHandler * Make runHandler and most of fields private and pass containersRunning to handler * Pass containersRunning value * deploy using common Handler * Fix tests * Use specific Dev/Deploy mode for Apply * Fix cmdline for job * Fix unit tests * Pass appName and componentName with ctx to handler * Move handler to pkg/component package * Update doc * Unit tests Deploy * Unit tests Build * Unit tests Run * Unit tests PostStart * Unit tests PreStop * Update doc * Fix Podman tests * Fix hotReload on podman * Change podman version timeout to 30s for tests * Cleanup + fix doc
This commit is contained in:
46
docs/proposals/commands-semantic.md
Normal file
46
docs/proposals/commands-semantic.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Semantic of commands
|
||||
|
||||
Components:
|
||||
- container
|
||||
- cluster resource (Kubernetes/OpenShift)
|
||||
- volume
|
||||
- image
|
||||
|
||||
| Command | PreStart | PostStart | PreStop | PostStop |
|
||||
|-----------------------------|----------|-----------|---------|-----------|
|
||||
| exec on container | | Yt | Yt | |
|
||||
| exec on cluster resource | N/A | N/A | N/A | N/A |
|
||||
| exec on volume | N/A | N/A | N/A | N/A |
|
||||
| exec on image | N/A | N/A | N/A | N/A |
|
||||
| | | | | |
|
||||
| apply on container | ? | ? | ? | ? |
|
||||
| apply on cluster resource | | Yt | Yt | |
|
||||
| apply on volume | ? | ? | ? | ? |
|
||||
| apply on image | | Yt | Yt | |
|
||||
| | | | | |
|
||||
| composite serial | | | | |
|
||||
| composite parallel | | | | |
|
||||
|
||||
|
||||
| Command | Build | Run/Debug | Deploy |
|
||||
|-----------------------------|-------|-----------|--------|
|
||||
| exec on container | Yt | Yt | Yt |
|
||||
| exec on cluster resource | N/A | N/A | N/A |
|
||||
| exec on volume | N/A | N/A | N/A |
|
||||
| exec on image | N/A | N/A | N/A |
|
||||
| | | | |
|
||||
| apply on container | ? | ? | ? |
|
||||
| apply on cluster resource | Yt | Yt | Yt |
|
||||
| apply on volume | ? | ? | ? |
|
||||
| apply on image | Yt | Yt | Yt |
|
||||
| | | | |
|
||||
| composite serial | | | |
|
||||
| composite parallel | | | |
|
||||
|
||||
|
||||
Legend:
|
||||
|
||||
- 0: Supported by handler but not implemented
|
||||
- Y: Implemented by pkg/component.NewRunHandler (Yt: tested in pkg/component/handler_test.go)
|
||||
- N/A: Not applicable (by spec)
|
||||
- ?: Spec is not clear
|
||||
@@ -538,3 +538,11 @@ func ListRoutesAndIngresses(client kclient.ClientInterface, componentName, appNa
|
||||
|
||||
return ings, routes, nil
|
||||
}
|
||||
|
||||
func GetContainersNames(pod *corev1.Pod) []string {
|
||||
result := make([]string, 0, len(pod.Spec.Containers))
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result = append(result, container.Name)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/configAutomount"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
odolabels "github.com/redhat-developer/odo/pkg/labels"
|
||||
@@ -28,9 +29,10 @@ import (
|
||||
)
|
||||
|
||||
type DeleteComponentClient struct {
|
||||
kubeClient kclient.ClientInterface
|
||||
podmanClient podman.Client
|
||||
execClient exec.Client
|
||||
kubeClient kclient.ClientInterface
|
||||
podmanClient podman.Client
|
||||
execClient exec.Client
|
||||
configAutomountClient configAutomount.Client
|
||||
}
|
||||
|
||||
var _ Client = (*DeleteComponentClient)(nil)
|
||||
@@ -39,11 +41,13 @@ func NewDeleteComponentClient(
|
||||
kubeClient kclient.ClientInterface,
|
||||
podmanClient podman.Client,
|
||||
execClient exec.Client,
|
||||
configAutomountClient configAutomount.Client,
|
||||
) *DeleteComponentClient {
|
||||
return &DeleteComponentClient{
|
||||
kubeClient: kubeClient,
|
||||
podmanClient: podmanClient,
|
||||
execClient: execClient,
|
||||
kubeClient: kubeClient,
|
||||
podmanClient: podmanClient,
|
||||
execClient: execClient,
|
||||
configAutomountClient: configAutomountClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,7 +220,20 @@ func (do *DeleteComponentClient) ExecutePreStopEvents(ctx context.Context, devfi
|
||||
|
||||
klog.V(4).Infof("Executing %q event commands for component %q", libdevfile.PreStop, componentName)
|
||||
// ignore the failures if any; delete should not fail because preStop events failed to execute
|
||||
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, component.NewExecHandler(do.kubeClient, do.execClient, appName, componentName, pod.Name, "Executing pre-stop command in container", false, false))
|
||||
handler := component.NewRunHandler(
|
||||
ctx,
|
||||
do.kubeClient,
|
||||
do.execClient,
|
||||
do.configAutomountClient,
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing pre-stop command in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PreStop events
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
)
|
||||
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, handler)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to execute %q event commands for component %q, cause: %v", libdevfile.PreStop, componentName, err.Error())
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func TestDeleteComponentClient_ListClusterResourcesToDelete(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
kubeClient := tt.fields.kubeClient(ctrl)
|
||||
execClient := exec.NewExecClient(kubeClient)
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient)
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
|
||||
ctx := odocontext.WithApplication(context.TODO(), "app")
|
||||
got, err := do.ListClusterResourcesToDelete(ctx, tt.args.componentName, tt.args.namespace, tt.args.mode)
|
||||
if (err != nil) != tt.wantErr {
|
||||
@@ -277,7 +277,7 @@ func TestDeleteComponentClient_DeleteResources(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
kubeClient := tt.fields.kubeClient(ctrl)
|
||||
execClient := exec.NewExecClient(kubeClient)
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient)
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
|
||||
got := do.DeleteResources(tt.args.resources, false)
|
||||
if diff := cmp.Diff(tt.want, got); diff != "" {
|
||||
t.Errorf("DeleteComponentClient.DeleteResources() mismatch (-want +got):\n%s", diff)
|
||||
@@ -686,10 +686,10 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
|
||||
client := kclient.NewMockClientInterface(ctrl)
|
||||
|
||||
selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
|
||||
client.EXPECT().GetRunningPodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "runtime"), nil)
|
||||
client.EXPECT().GetRunningPodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime"), nil)
|
||||
|
||||
cmd := []string{"/bin/sh", "-c", "cd /projects/nodejs-starter && (echo \"Hello World!\") 1>>/proc/1/fd/1 2>>/proc/1/fd/2"}
|
||||
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "runtime", cmd, gomock.Any(), gomock.Any(), nil, false).Return(nil)
|
||||
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "mypod", cmd, gomock.Any(), gomock.Any(), nil, false).Return(nil)
|
||||
|
||||
return client
|
||||
},
|
||||
@@ -707,7 +707,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
|
||||
client := kclient.NewMockClientInterface(ctrl)
|
||||
|
||||
selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
|
||||
pod := odoTestingUtil.CreateFakePod(componentName, "runtime")
|
||||
pod := odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime")
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
client.EXPECT().GetRunningPodFromSelector(selector).Return(pod, nil)
|
||||
return client
|
||||
@@ -726,14 +726,14 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
|
||||
client := kclient.NewMockClientInterface(ctrl)
|
||||
|
||||
selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
|
||||
fakePod := odoTestingUtil.CreateFakePod(componentName, "runtime")
|
||||
fakePod := odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime")
|
||||
// Expecting this method to be called twice because if the command execution fails, we try to get the pod logs by calling GetOnePodFromSelector again.
|
||||
client.EXPECT().GetRunningPodFromSelector(selector).Return(fakePod, nil).Times(2)
|
||||
|
||||
client.EXPECT().GetPodLogs(fakePod.Name, gomock.Any(), gomock.Any()).Return(nil, errors.New("an error"))
|
||||
|
||||
cmd := []string{"/bin/sh", "-c", "cd /projects/nodejs-starter && (echo \"Hello World!\") 1>>/proc/1/fd/1 2>>/proc/1/fd/2"}
|
||||
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "runtime", cmd, gomock.Any(), gomock.Any(), nil, false).Return(errors.New("some error"))
|
||||
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "mypod", cmd, gomock.Any(), gomock.Any(), nil, false).Return(errors.New("some error"))
|
||||
|
||||
return client
|
||||
},
|
||||
@@ -750,8 +750,11 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
kubeClient := tt.fields.kubeClient(ctrl)
|
||||
execClient := exec.NewExecClient(kubeClient)
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient)
|
||||
if err := do.ExecutePreStopEvents(context.Background(), tt.args.devfileObj, tt.args.appName, tt.args.devfileObj.GetMetadataName()); (err != nil) != tt.wantErr {
|
||||
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
|
||||
ctx := context.Background()
|
||||
ctx = odocontext.WithApplication(ctx, appName)
|
||||
ctx = odocontext.WithComponentName(ctx, componentName)
|
||||
if err := do.ExecutePreStopEvents(ctx, tt.args.devfileObj, tt.args.appName, tt.args.devfileObj.GetMetadataName()); (err != nil) != tt.wantErr {
|
||||
t.Errorf("DeleteComponent() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
180
pkg/component/execute_new_container.go
Normal file
180
pkg/component/execute_new_container.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/generator"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/configAutomount"
|
||||
"github.com/redhat-developer/odo/pkg/dev/kubedev/storage"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
odolabels "github.com/redhat-developer/odo/pkg/labels"
|
||||
odogenerator "github.com/redhat-developer/odo/pkg/libdevfile/generator"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func ExecuteInNewContainer(
|
||||
ctx context.Context,
|
||||
kubeClient kclient.ClientInterface,
|
||||
configAutomountClient configAutomount.Client,
|
||||
devfileObj parser.DevfileObj,
|
||||
componentName string,
|
||||
appName string,
|
||||
command v1alpha2.Command,
|
||||
) error {
|
||||
policy, err := kubeClient.GetCurrentNamespacePolicy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
podTemplateSpec, err := generator.GetPodTemplateSpec(devfileObj, generator.PodTemplateParams{
|
||||
Options: common.DevfileOptions{
|
||||
FilterByName: command.Exec.Component,
|
||||
},
|
||||
PodSecurityAdmissionPolicy: policy,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Setting the restart policy to "never" so that pods are kept around after the job finishes execution; this is helpful in obtaining logs to debug.
|
||||
podTemplateSpec.Spec.RestartPolicy = "Never"
|
||||
|
||||
if len(podTemplateSpec.Spec.Containers) != 1 {
|
||||
return fmt.Errorf("could not find the component")
|
||||
}
|
||||
|
||||
podTemplateSpec.Spec.Containers[0].Command = []string{"/bin/sh"}
|
||||
podTemplateSpec.Spec.Containers[0].Args = getJobCmdline(command)
|
||||
|
||||
volumes, err := storage.GetAutomountVolumes(configAutomountClient, podTemplateSpec.Spec.Containers, podTemplateSpec.Spec.InitContainers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
podTemplateSpec.Spec.Volumes = volumes
|
||||
|
||||
// Create a Kubernetes Job and use the container image referenced by command.Exec.Component
|
||||
// Get the component for the command with command.Exec.Component
|
||||
getJobName := func() string {
|
||||
maxLen := kclient.JobNameOdoMaxLength - len(command.Id)
|
||||
// We ignore the error here because our component name or app name will never be empty; which are the only cases when an error might be raised.
|
||||
name, _ := util.NamespaceKubernetesObjectWithTrim(componentName, appName, maxLen)
|
||||
name += "-" + command.Id
|
||||
return name
|
||||
}
|
||||
completionMode := batchv1.CompletionMode("Indexed")
|
||||
jobParams := odogenerator.JobParams{
|
||||
TypeMeta: generator.GetTypeMeta(kclient.JobsKind, kclient.JobsAPIVersion),
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getJobName(),
|
||||
},
|
||||
PodTemplateSpec: *podTemplateSpec,
|
||||
SpecParams: odogenerator.JobSpecParams{
|
||||
CompletionMode: &completionMode,
|
||||
TTLSecondsAfterFinished: pointer.Int32(60),
|
||||
BackOffLimit: pointer.Int32(1),
|
||||
},
|
||||
}
|
||||
job := odogenerator.GetJob(jobParams)
|
||||
// Set labels and annotations
|
||||
job.SetLabels(odolabels.GetLabels(componentName, appName, GetComponentRuntimeFromDevfileMetadata(devfileObj.Data.GetMetadata()), odolabels.ComponentDeployMode, false))
|
||||
job.Annotations = map[string]string{}
|
||||
odolabels.AddCommonAnnotations(job.Annotations)
|
||||
odolabels.SetProjectType(job.Annotations, GetComponentTypeFromDevfileMetadata(devfileObj.Data.GetMetadata()))
|
||||
|
||||
// Make sure there are no existing jobs
|
||||
checkAndDeleteExistingJob := func() {
|
||||
items, dErr := kubeClient.ListJobs(odolabels.GetSelector(componentName, appName, odolabels.ComponentDeployMode, false))
|
||||
if dErr != nil {
|
||||
klog.V(4).Infof("failed to list jobs; cause: %s", dErr.Error())
|
||||
return
|
||||
}
|
||||
jobName := getJobName()
|
||||
for _, item := range items.Items {
|
||||
if strings.Contains(item.Name, jobName) {
|
||||
dErr = kubeClient.DeleteJob(item.Name)
|
||||
if dErr != nil {
|
||||
klog.V(4).Infof("failed to delete job %q; cause: %s", item.Name, dErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
checkAndDeleteExistingJob()
|
||||
|
||||
log.Sectionf("Executing command:")
|
||||
spinner := log.Spinnerf("Executing command in container (command: %s)", command.Id)
|
||||
defer spinner.End(false)
|
||||
|
||||
var createdJob *batchv1.Job
|
||||
createdJob, err = kubeClient.CreateJob(job, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = kubeClient.DeleteJob(createdJob.Name)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("failed to delete job %q; cause: %s", createdJob.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
// Print the tip to use `odo logs` if the command is still running after 1 minute
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
log.Info("\nTip: Run `odo logs --deploy --follow` to get the logs of the command output.")
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the command to complete execution
|
||||
_, err = kubeClient.WaitForJobToComplete(createdJob)
|
||||
done <- struct{}{}
|
||||
|
||||
spinner.End(err == nil)
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to execute (command: %s)", command.Id)
|
||||
// Print the job logs if the job failed
|
||||
jobLogs, logErr := kubeClient.GetJobLogs(createdJob, command.Exec.Component)
|
||||
if logErr != nil {
|
||||
log.Warningf("failed to fetch the logs of execution; cause: %s", logErr)
|
||||
}
|
||||
fmt.Println("Execution output:")
|
||||
_ = util.DisplayLog(false, jobLogs, log.GetStderr(), componentName, 100)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getJobCmdline(command v1alpha2.Command) []string {
|
||||
// deal with environment variables
|
||||
var cmdLine string
|
||||
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)
|
||||
|
||||
if setEnvVariable == "" {
|
||||
cmdLine = command.Exec.CommandLine
|
||||
} else {
|
||||
cmdLine = setEnvVariable + " && " + command.Exec.CommandLine
|
||||
}
|
||||
var args []string
|
||||
if command.Exec.WorkingDir != "" {
|
||||
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
|
||||
args = []string{"-c", "cd " + command.Exec.WorkingDir + " && " + cmdLine}
|
||||
} else {
|
||||
args = []string{"-c", cmdLine}
|
||||
}
|
||||
return args
|
||||
}
|
||||
@@ -6,64 +6,25 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/platform"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
type execHandler struct {
|
||||
platformClient platform.Client
|
||||
execClient exec.Client
|
||||
appName string
|
||||
componentName string
|
||||
podName string
|
||||
msg string
|
||||
show bool
|
||||
componentExists bool
|
||||
}
|
||||
|
||||
var _ libdevfile.Handler = (*execHandler)(nil)
|
||||
|
||||
const ShellExecutable string = "/bin/sh"
|
||||
|
||||
func NewExecHandler(platformClient platform.Client, execClient exec.Client, appName, cmpName, podName, msg string, show bool, componentExists bool) *execHandler {
|
||||
return &execHandler{
|
||||
platformClient: platformClient,
|
||||
execClient: execClient,
|
||||
appName: appName,
|
||||
componentName: cmpName,
|
||||
podName: podName,
|
||||
msg: msg,
|
||||
show: show,
|
||||
componentExists: componentExists,
|
||||
}
|
||||
}
|
||||
func ExecuteTerminatingCommand(ctx context.Context, execClient exec.Client, platformClient platform.Client, command devfilev1.Command, componentExists bool, podName string, appName string, componentName string, msg string, show bool) error {
|
||||
|
||||
func (o *execHandler) ApplyImage(image v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execHandler) ApplyOpenShift(openshift v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execHandler) Execute(ctx context.Context, command v1alpha2.Command) error {
|
||||
if o.componentExists && command.Exec != nil && pointer.BoolDeref(command.Exec.HotReloadCapable, false) {
|
||||
if componentExists && command.Exec != nil && pointer.BoolDeref(command.Exec.HotReloadCapable, false) {
|
||||
klog.V(2).Infof("command is hot-reload capable, not executing %q again", command.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := o.msg
|
||||
if msg == "" {
|
||||
msg = fmt.Sprintf("Executing %s command on container %q", command.Id, command.Exec.Component)
|
||||
} else {
|
||||
@@ -76,20 +37,20 @@ func (o *execHandler) Execute(ctx context.Context, command v1alpha2.Command) err
|
||||
stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := logger.CreateContainerOutputWriter()
|
||||
|
||||
cmdline := getCmdline(command)
|
||||
_, _, err := o.execClient.ExecuteCommand(ctx, cmdline, o.podName, command.Exec.Component, o.show, stdoutWriter, stderrWriter)
|
||||
_, _, err := execClient.ExecuteCommand(ctx, cmdline, podName, command.Exec.Component, show, stdoutWriter, stderrWriter)
|
||||
|
||||
closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel)
|
||||
|
||||
spinner.End(err == nil)
|
||||
if err != nil {
|
||||
rd, errLog := Log(o.platformClient, o.componentName, o.appName, false, command)
|
||||
rd, errLog := Log(platformClient, componentName, appName, false, command)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
}
|
||||
|
||||
// Use GetStderr in order to make sure that colour output is correct
|
||||
// on non-TTY terminals
|
||||
errLog = util.DisplayLog(false, rd, log.GetStderr(), o.componentName, -1)
|
||||
errLog = util.DisplayLog(false, rd, log.GetStderr(), componentName, -1)
|
||||
if errLog != nil {
|
||||
return fmt.Errorf("unable to log error %v: %w", err, errLog)
|
||||
}
|
||||
161
pkg/component/handler.go
Normal file
161
pkg/component/handler.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
"k8s.io/klog"
|
||||
|
||||
envcontext "github.com/redhat-developer/odo/pkg/config/context"
|
||||
"github.com/redhat-developer/odo/pkg/configAutomount"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
odolabels "github.com/redhat-developer/odo/pkg/labels"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
|
||||
"github.com/redhat-developer/odo/pkg/platform"
|
||||
"github.com/redhat-developer/odo/pkg/remotecmd"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
)
|
||||
|
||||
type runHandler struct {
|
||||
ctx context.Context
|
||||
platformClient platform.Client
|
||||
execClient exec.Client
|
||||
configAutomountClient configAutomount.Client
|
||||
podName string
|
||||
ComponentExists bool
|
||||
containersRunning []string
|
||||
msg string
|
||||
|
||||
fs filesystem.Filesystem
|
||||
imageBackend image.Backend
|
||||
|
||||
devfile parser.DevfileObj
|
||||
path string
|
||||
}
|
||||
|
||||
var _ libdevfile.Handler = (*runHandler)(nil)
|
||||
|
||||
func NewRunHandler(
|
||||
ctx context.Context,
|
||||
platformClient platform.Client,
|
||||
execClient exec.Client,
|
||||
configAutomountClient configAutomount.Client,
|
||||
podName string,
|
||||
componentExists bool,
|
||||
containersRunning []string,
|
||||
msg string,
|
||||
|
||||
// For building images
|
||||
fs filesystem.Filesystem,
|
||||
imageBackend image.Backend,
|
||||
|
||||
// For apply Kubernetes / Openshift
|
||||
devfile parser.DevfileObj,
|
||||
path string,
|
||||
|
||||
) *runHandler {
|
||||
return &runHandler{
|
||||
ctx: ctx,
|
||||
platformClient: platformClient,
|
||||
execClient: execClient,
|
||||
configAutomountClient: configAutomountClient,
|
||||
podName: podName,
|
||||
ComponentExists: componentExists,
|
||||
containersRunning: containersRunning,
|
||||
msg: msg,
|
||||
|
||||
fs: fs,
|
||||
imageBackend: imageBackend,
|
||||
|
||||
devfile: devfile,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *runHandler) ApplyImage(img devfilev1.Component) error {
|
||||
return image.BuildPushSpecificImage(a.ctx, a.imageBackend, a.fs, img, envcontext.GetEnvConfig(a.ctx).PushImages)
|
||||
}
|
||||
|
||||
func (a *runHandler) ApplyKubernetes(kubernetes devfilev1.Component, kind v1alpha2.CommandGroupKind) error {
|
||||
var (
|
||||
componentName = odocontext.GetComponentName(a.ctx)
|
||||
appName = odocontext.GetApplication(a.ctx)
|
||||
)
|
||||
mode := odolabels.ComponentDevMode
|
||||
if kind == v1alpha2.DeployCommandGroupKind {
|
||||
mode = odolabels.ComponentDeployMode
|
||||
}
|
||||
switch platform := a.platformClient.(type) {
|
||||
case kclient.ClientInterface:
|
||||
return ApplyKubernetes(mode, appName, componentName, a.devfile, kubernetes, platform, a.path)
|
||||
default:
|
||||
klog.V(4).Info("apply kubernetes/Openshift commands are not implemented on podman")
|
||||
log.Warningf("Apply Kubernetes/Openshift components are not supported on Podman. Skipping: %v.", kubernetes.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *runHandler) ApplyOpenShift(openshift devfilev1.Component, kind v1alpha2.CommandGroupKind) error {
|
||||
return a.ApplyKubernetes(openshift, kind)
|
||||
}
|
||||
|
||||
func (a *runHandler) ExecuteNonTerminatingCommand(ctx context.Context, command devfilev1.Command) error {
|
||||
var (
|
||||
componentName = odocontext.GetComponentName(a.ctx)
|
||||
appName = odocontext.GetApplication(a.ctx)
|
||||
)
|
||||
if isContainerRunning(command.Exec.Component, a.containersRunning) {
|
||||
return ExecuteRunCommand(ctx, a.execClient, a.platformClient, command, a.ComponentExists, a.podName, appName, componentName)
|
||||
}
|
||||
switch platform := a.platformClient.(type) {
|
||||
case kclient.ClientInterface:
|
||||
return ExecuteInNewContainer(ctx, platform, a.configAutomountClient, a.devfile, componentName, appName, command)
|
||||
default:
|
||||
klog.V(4).Info("executing a command in a new container is not implemented on podman")
|
||||
log.Warningf("executing a command in a new container is not implemented on podman. Skipping: %v.", command.Id)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *runHandler) ExecuteTerminatingCommand(ctx context.Context, command devfilev1.Command) error {
|
||||
var (
|
||||
componentName = odocontext.GetComponentName(a.ctx)
|
||||
appName = odocontext.GetApplication(a.ctx)
|
||||
)
|
||||
if isContainerRunning(command.Exec.Component, a.containersRunning) {
|
||||
return ExecuteTerminatingCommand(ctx, a.execClient, a.platformClient, command, a.ComponentExists, a.podName, appName, componentName, a.msg, false)
|
||||
}
|
||||
switch platform := a.platformClient.(type) {
|
||||
case kclient.ClientInterface:
|
||||
return ExecuteInNewContainer(ctx, platform, a.configAutomountClient, a.devfile, componentName, appName, command)
|
||||
default:
|
||||
klog.V(4).Info("executing a command in a new container is not implemented on podman")
|
||||
log.Warningf("executing a command in a new container is not implemented on podman. Skipping: %v.", command.Id)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsRemoteProcessForCommandRunning returns true if the command is running
|
||||
func (a *runHandler) IsRemoteProcessForCommandRunning(ctx context.Context, command devfilev1.Command, podName string) (bool, error) {
|
||||
remoteProcess, err := remotecmd.NewKubeExecProcessHandler(a.execClient).GetProcessInfoForCommand(ctx, remotecmd.CommandDefinition{Id: command.Id}, podName, command.Exec.Component)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return remoteProcess.Status == remotecmd.Running, nil
|
||||
}
|
||||
|
||||
func isContainerRunning(container string, containers []string) bool {
|
||||
for _, cnt := range containers {
|
||||
if container == cnt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
1532
pkg/component/handler_test.go
Normal file
1532
pkg/component/handler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,32 +2,18 @@ package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/generator"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/configAutomount"
|
||||
"github.com/redhat-developer/odo/pkg/dev/kubedev/storage"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
odolabels "github.com/redhat-developer/odo/pkg/labels"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
odogenerator "github.com/redhat-developer/odo/pkg/libdevfile/generator"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
)
|
||||
|
||||
type DeployClient struct {
|
||||
@@ -48,14 +34,25 @@ func NewDeployClient(kubeClient kclient.ClientInterface, configAutomountClient c
|
||||
|
||||
func (o *DeployClient) Deploy(ctx context.Context) error {
|
||||
var (
|
||||
devfileObj = odocontext.GetEffectiveDevfileObj(ctx)
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
componentName = odocontext.GetComponentName(ctx)
|
||||
appName = odocontext.GetApplication(ctx)
|
||||
devfileObj = odocontext.GetEffectiveDevfileObj(ctx)
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
)
|
||||
|
||||
handler := newDeployHandler(ctx, o.fs, *devfileObj, path, o.kubeClient, o.configAutomountClient, appName, componentName)
|
||||
handler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.kubeClient,
|
||||
nil,
|
||||
o.configAutomountClient,
|
||||
"",
|
||||
false,
|
||||
nil,
|
||||
"",
|
||||
o.fs,
|
||||
image.SelectBackend(ctx),
|
||||
*devfileObj,
|
||||
path,
|
||||
)
|
||||
|
||||
err := o.buildPushAutoImageComponents(handler, *devfileObj)
|
||||
if err != nil {
|
||||
@@ -70,7 +67,7 @@ func (o *DeployClient) Deploy(ctx context.Context) error {
|
||||
return libdevfile.Deploy(ctx, *devfileObj, handler)
|
||||
}
|
||||
|
||||
func (o *DeployClient) buildPushAutoImageComponents(handler *deployHandler, devfileObj parser.DevfileObj) error {
|
||||
func (o *DeployClient) buildPushAutoImageComponents(handler libdevfile.Handler, devfileObj parser.DevfileObj) error {
|
||||
components, err := libdevfile.GetImageComponentsToPushAutomatically(devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,14 +82,14 @@ func (o *DeployClient) buildPushAutoImageComponents(handler *deployHandler, devf
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *DeployClient) applyAutoK8sOrOcComponents(handler *deployHandler, devfileObj parser.DevfileObj) error {
|
||||
func (o *DeployClient) applyAutoK8sOrOcComponents(handler libdevfile.Handler, devfileObj parser.DevfileObj) error {
|
||||
components, err := libdevfile.GetK8sAndOcComponentsToPush(devfileObj, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range components {
|
||||
var f func(component2 v1alpha2.Component) error
|
||||
var f func(component2 v1alpha2.Component, kind v1alpha2.CommandGroupKind) error
|
||||
if c.Kubernetes != nil {
|
||||
f = handler.ApplyKubernetes
|
||||
} else if c.Openshift != nil {
|
||||
@@ -101,197 +98,9 @@ func (o *DeployClient) applyAutoK8sOrOcComponents(handler *deployHandler, devfil
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
if err = f(c); err != nil {
|
||||
if err = f(c, v1alpha2.DeployCommandGroupKind); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type deployHandler struct {
|
||||
ctx context.Context
|
||||
fs filesystem.Filesystem
|
||||
devfileObj parser.DevfileObj
|
||||
path string
|
||||
kubeClient kclient.ClientInterface
|
||||
configAutomountClient configAutomount.Client
|
||||
appName string
|
||||
componentName string
|
||||
}
|
||||
|
||||
var _ libdevfile.Handler = (*deployHandler)(nil)
|
||||
|
||||
func newDeployHandler(ctx context.Context, fs filesystem.Filesystem, devfileObj parser.DevfileObj, path string, kubeClient kclient.ClientInterface, configAutomountClient configAutomount.Client, appName string, componentName string) *deployHandler {
|
||||
return &deployHandler{
|
||||
ctx: ctx,
|
||||
fs: fs,
|
||||
devfileObj: devfileObj,
|
||||
path: path,
|
||||
kubeClient: kubeClient,
|
||||
configAutomountClient: configAutomountClient,
|
||||
appName: appName,
|
||||
componentName: componentName,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyImage builds and pushes the OCI image to be used on Kubernetes
|
||||
func (o *deployHandler) ApplyImage(img v1alpha2.Component) error {
|
||||
return image.BuildPushSpecificImage(o.ctx, o.fs, img, true)
|
||||
}
|
||||
|
||||
// ApplyKubernetes applies inline Kubernetes YAML from the devfile.yaml file
|
||||
func (o *deployHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
return component.ApplyKubernetes(odolabels.ComponentDeployMode, o.appName, o.componentName, o.devfileObj, kubernetes, o.kubeClient, o.path)
|
||||
}
|
||||
|
||||
// ApplyOpenShift applies inline OpenShift YAML from the devfile.yaml file
|
||||
func (o *deployHandler) ApplyOpenShift(openshift v1alpha2.Component) error {
|
||||
return component.ApplyKubernetes(odolabels.ComponentDeployMode, o.appName, o.componentName, o.devfileObj, openshift, o.kubeClient, o.path)
|
||||
}
|
||||
|
||||
// Execute will deploy the listed information in the `exec` section of devfile.yaml
|
||||
func (o *deployHandler) Execute(ctx context.Context, command v1alpha2.Command) error {
|
||||
policy, err := o.kubeClient.GetCurrentNamespacePolicy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
podTemplateSpec, err := generator.GetPodTemplateSpec(o.devfileObj, generator.PodTemplateParams{
|
||||
Options: common.DevfileOptions{
|
||||
FilterByName: command.Exec.Component,
|
||||
},
|
||||
PodSecurityAdmissionPolicy: policy,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Setting the restart policy to "never" so that pods are kept around after the job finishes execution; this is helpful in obtaining logs to debug.
|
||||
podTemplateSpec.Spec.RestartPolicy = "Never"
|
||||
|
||||
if len(podTemplateSpec.Spec.Containers) != 1 {
|
||||
return fmt.Errorf("could not find the component")
|
||||
}
|
||||
|
||||
podTemplateSpec.Spec.Containers[0].Command = []string{"/bin/sh"}
|
||||
podTemplateSpec.Spec.Containers[0].Args = getCmdline(command)
|
||||
|
||||
volumes, err := storage.GetAutomountVolumes(o.configAutomountClient, podTemplateSpec.Spec.Containers, podTemplateSpec.Spec.InitContainers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
podTemplateSpec.Spec.Volumes = volumes
|
||||
|
||||
// Create a Kubernetes Job and use the container image referenced by command.Exec.Component
|
||||
// Get the component for the command with command.Exec.Component
|
||||
getJobName := func() string {
|
||||
maxLen := kclient.JobNameOdoMaxLength - len(command.Id)
|
||||
// We ignore the error here because our component name or app name will never be empty; which are the only cases when an error might be raised.
|
||||
name, _ := util.NamespaceKubernetesObjectWithTrim(o.componentName, o.appName, maxLen)
|
||||
name += "-" + command.Id
|
||||
return name
|
||||
}
|
||||
completionMode := batchv1.CompletionMode("Indexed")
|
||||
jobParams := odogenerator.JobParams{
|
||||
TypeMeta: generator.GetTypeMeta(kclient.JobsKind, kclient.JobsAPIVersion),
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getJobName(),
|
||||
},
|
||||
PodTemplateSpec: *podTemplateSpec,
|
||||
SpecParams: odogenerator.JobSpecParams{
|
||||
CompletionMode: &completionMode,
|
||||
TTLSecondsAfterFinished: pointer.Int32(60),
|
||||
BackOffLimit: pointer.Int32(1),
|
||||
},
|
||||
}
|
||||
job := odogenerator.GetJob(jobParams)
|
||||
// Set labels and annotations
|
||||
job.SetLabels(odolabels.GetLabels(o.componentName, o.appName, component.GetComponentRuntimeFromDevfileMetadata(o.devfileObj.Data.GetMetadata()), odolabels.ComponentDeployMode, false))
|
||||
job.Annotations = map[string]string{}
|
||||
odolabels.AddCommonAnnotations(job.Annotations)
|
||||
odolabels.SetProjectType(job.Annotations, component.GetComponentTypeFromDevfileMetadata(o.devfileObj.Data.GetMetadata()))
|
||||
|
||||
// Make sure there are no existing jobs
|
||||
checkAndDeleteExistingJob := func() {
|
||||
items, dErr := o.kubeClient.ListJobs(odolabels.GetSelector(o.componentName, o.appName, odolabels.ComponentDeployMode, false))
|
||||
if dErr != nil {
|
||||
klog.V(4).Infof("failed to list jobs; cause: %s", dErr.Error())
|
||||
return
|
||||
}
|
||||
jobName := getJobName()
|
||||
for _, item := range items.Items {
|
||||
if strings.Contains(item.Name, jobName) {
|
||||
dErr = o.kubeClient.DeleteJob(item.Name)
|
||||
if dErr != nil {
|
||||
klog.V(4).Infof("failed to delete job %q; cause: %s", item.Name, dErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
checkAndDeleteExistingJob()
|
||||
|
||||
log.Sectionf("Executing command:")
|
||||
spinner := log.Spinnerf("Executing command in container (command: %s)", command.Id)
|
||||
defer spinner.End(false)
|
||||
|
||||
var createdJob *batchv1.Job
|
||||
createdJob, err = o.kubeClient.CreateJob(job, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = o.kubeClient.DeleteJob(createdJob.Name)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("failed to delete job %q; cause: %s", createdJob.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
// Print the tip to use `odo logs` if the command is still running after 1 minute
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
log.Info("\nTip: Run `odo logs --deploy --follow` to get the logs of the command output.")
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the command to complete execution
|
||||
_, err = o.kubeClient.WaitForJobToComplete(createdJob)
|
||||
done <- struct{}{}
|
||||
|
||||
spinner.End(err == nil)
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to execute (command: %s)", command.Id)
|
||||
// Print the job logs if the job failed
|
||||
jobLogs, logErr := o.kubeClient.GetJobLogs(createdJob, command.Exec.Component)
|
||||
if logErr != nil {
|
||||
log.Warningf("failed to fetch the logs of execution; cause: %s", logErr)
|
||||
}
|
||||
fmt.Println("Execution output:")
|
||||
_ = util.DisplayLog(false, jobLogs, log.GetStderr(), o.componentName, 100)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getCmdline(command v1alpha2.Command) []string {
|
||||
// deal with environment variables
|
||||
var cmdLine string
|
||||
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)
|
||||
|
||||
if setEnvVariable == "" {
|
||||
cmdLine = command.Exec.CommandLine
|
||||
} else {
|
||||
cmdLine = setEnvVariable + " && " + command.Exec.CommandLine
|
||||
}
|
||||
var args []string
|
||||
if command.Exec.WorkingDir != "" {
|
||||
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
|
||||
args = []string{"-c", "cd " + command.Exec.WorkingDir + " && " + cmdLine}
|
||||
} else {
|
||||
args = []string{"-c", cmdLine}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
@@ -182,7 +182,8 @@ func (o *DevClient) buildPushAutoImageComponents(ctx context.Context, fs filesys
|
||||
klog.V(1).Infof("Skipping image component %q; already applied and not changed", c.Name)
|
||||
continue
|
||||
}
|
||||
err = image.BuildPushSpecificImage(ctx, fs, c, true)
|
||||
|
||||
err = image.BuildPushSpecificImage(ctx, image.SelectBackend(ctx), fs, c, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
package kubedev
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
odolabels "github.com/redhat-developer/odo/pkg/labels"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/remotecmd"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
)
|
||||
|
||||
type runHandler struct {
|
||||
fs filesystem.Filesystem
|
||||
execClient exec.Client
|
||||
appName string
|
||||
componentName string
|
||||
devfile parser.DevfileObj
|
||||
kubeClient kclient.ClientInterface
|
||||
path string
|
||||
componentExists bool
|
||||
podName string
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
var _ libdevfile.Handler = (*runHandler)(nil)
|
||||
|
||||
func (a *runHandler) ApplyImage(img devfilev1.Component) error {
|
||||
return image.BuildPushSpecificImage(a.ctx, a.fs, img, true)
|
||||
}
|
||||
|
||||
func (a *runHandler) ApplyKubernetes(kubernetes devfilev1.Component) error {
|
||||
return component.ApplyKubernetes(odolabels.ComponentDevMode, a.appName, a.componentName, a.devfile, kubernetes, a.kubeClient, a.path)
|
||||
}
|
||||
|
||||
func (a *runHandler) ApplyOpenShift(openshift devfilev1.Component) error {
|
||||
return component.ApplyKubernetes(odolabels.ComponentDevMode, a.appName, a.componentName, a.devfile, openshift, a.kubeClient, a.path)
|
||||
}
|
||||
|
||||
func (a *runHandler) Execute(ctx context.Context, command devfilev1.Command) error {
|
||||
return component.ExecuteRunCommand(ctx, a.execClient, a.kubeClient, command, a.componentExists, a.podName, a.appName, a.componentName)
|
||||
|
||||
}
|
||||
|
||||
// IsRemoteProcessForCommandRunning returns true if the command is running
|
||||
func (a *runHandler) IsRemoteProcessForCommandRunning(ctx context.Context, command devfilev1.Command, podName string) (bool, error) {
|
||||
remoteProcess, err := remotecmd.NewKubeExecProcessHandler(a.execClient).GetProcessInfoForCommand(ctx, remotecmd.CommandDefinition{Id: command.Id}, podName, command.Exec.Component)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return remoteProcess.Status == remotecmd.Running, nil
|
||||
}
|
||||
@@ -7,10 +7,12 @@ import (
|
||||
"time"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/v2/pkg/devfile/parser"
|
||||
parsercommon "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/dev/common"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
|
||||
@@ -23,7 +25,6 @@ import (
|
||||
|
||||
func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParameters, componentStatus *watch.ComponentStatus) error {
|
||||
var (
|
||||
appName = odocontext.GetApplication(ctx)
|
||||
componentName = odocontext.GetComponentName(ctx)
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
@@ -86,10 +87,23 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
}
|
||||
s.End(true)
|
||||
|
||||
// PostStart events from the devfile will only be executed when the component
|
||||
// didn't previously exist
|
||||
if !componentStatus.PostStartEventsDone && libdevfile.HasPostStartEvents(parameters.Devfile) {
|
||||
err = libdevfile.ExecPostStartEvents(ctx, parameters.Devfile, component.NewExecHandler(o.kubernetesClient, o.execClient, appName, componentName, pod.Name, "Executing post-start command in container", parameters.Show, false))
|
||||
// PostStart events from the devfile will only be executed when the component
|
||||
// didn't previously exist
|
||||
handler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.Name,
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing post-start command in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
)
|
||||
err = libdevfile.ExecPostStartEvents(ctx, parameters.Devfile, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -107,17 +121,22 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
}
|
||||
var running bool
|
||||
var isComposite bool
|
||||
cmdHandler := runHandler{
|
||||
fs: o.filesystem,
|
||||
execClient: o.execClient,
|
||||
kubeClient: o.kubernetesClient,
|
||||
appName: appName,
|
||||
componentName: componentName,
|
||||
devfile: parameters.Devfile,
|
||||
path: path,
|
||||
podName: pod.GetName(),
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
cmdHandler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.GetName(),
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"",
|
||||
|
||||
o.filesystem,
|
||||
image.SelectBackend(ctx),
|
||||
parameters.Devfile,
|
||||
path,
|
||||
)
|
||||
|
||||
if commandType == devfilev1.ExecCommandType {
|
||||
running, err = cmdHandler.IsRemoteProcessForCommandRunning(ctx, cmd, pod.Name)
|
||||
@@ -133,7 +152,7 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
commandType, cmd.Id)
|
||||
}
|
||||
|
||||
cmdHandler.componentExists = running || isComposite
|
||||
cmdHandler.ComponentExists = running || isComposite
|
||||
|
||||
klog.V(4).Infof("running=%v, execRequired=%v",
|
||||
running, execRequired)
|
||||
@@ -142,8 +161,19 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
// Invoke the build command once (before calling libdevfile.ExecuteCommandByNameAndKind), as, if cmd is a composite command,
|
||||
// the handler we pass will be called for each command in that composite command.
|
||||
doExecuteBuildCommand := func() error {
|
||||
execHandler := component.NewExecHandler(o.kubernetesClient, o.execClient, appName, componentName, pod.Name,
|
||||
"Building your application in container", parameters.Show, running)
|
||||
execHandler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.kubernetesClient,
|
||||
o.execClient,
|
||||
o.configAutomountClient,
|
||||
pod.Name,
|
||||
running,
|
||||
component.GetContainersNames(pod),
|
||||
"Building your application in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
)
|
||||
return libdevfile.Build(ctx, parameters.Devfile, parameters.StartOptions.BuildCommand, execHandler)
|
||||
}
|
||||
if err = doExecuteBuildCommand(); err != nil {
|
||||
@@ -151,7 +181,7 @@ func (o *DevClient) innerloop(ctx context.Context, parameters common.PushParamet
|
||||
return err
|
||||
}
|
||||
|
||||
err = libdevfile.ExecuteCommandByNameAndKind(ctx, parameters.Devfile, cmdName, cmdKind, &cmdHandler, false)
|
||||
err = libdevfile.ExecuteCommandByNameAndKind(ctx, parameters.Devfile, cmdName, cmdKind, cmdHandler, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package podmandev
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
envcontext "github.com/redhat-developer/odo/pkg/config/context"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/platform"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
)
|
||||
|
||||
type commandHandler struct {
|
||||
ctx context.Context
|
||||
fs filesystem.Filesystem
|
||||
execClient exec.Client
|
||||
platformClient platform.Client
|
||||
componentExists bool
|
||||
podName string
|
||||
appName string
|
||||
componentName string
|
||||
}
|
||||
|
||||
var _ libdevfile.Handler = (*commandHandler)(nil)
|
||||
|
||||
func (a commandHandler) ApplyImage(img devfilev1.Component) error {
|
||||
return image.BuildPushSpecificImage(a.ctx, a.fs, img, envcontext.GetEnvConfig(a.ctx).PushImages)
|
||||
}
|
||||
|
||||
func (a commandHandler) ApplyKubernetes(kubernetes devfilev1.Component) error {
|
||||
klog.V(4).Info("apply kubernetes commands are not implemented on podman")
|
||||
log.Warningf("Apply Kubernetes components are not supported on Podman. Skipping: %v.", kubernetes.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a commandHandler) ApplyOpenShift(openshift devfilev1.Component) error {
|
||||
klog.V(4).Info("apply OpenShift commands are not implemented on podman")
|
||||
log.Warningf("Apply OpenShift components are not supported on Podman. Skipping: %v.", openshift.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a commandHandler) Execute(ctx context.Context, command devfilev1.Command) error {
|
||||
return component.ExecuteRunCommand(ctx, a.execClient, a.platformClient, command, a.componentExists, a.podName, a.appName, a.componentName)
|
||||
}
|
||||
@@ -35,7 +35,6 @@ func (o *DevClient) reconcile(
|
||||
componentStatus *watch.ComponentStatus,
|
||||
) error {
|
||||
var (
|
||||
appName = odocontext.GetApplication(ctx)
|
||||
componentName = odocontext.GetComponentName(ctx)
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
@@ -65,15 +64,18 @@ func (o *DevClient) reconcile(
|
||||
// PostStart events from the devfile will only be executed when the component
|
||||
// didn't previously exist
|
||||
if !componentStatus.PostStartEventsDone && libdevfile.HasPostStartEvents(devfileObj) {
|
||||
execHandler := component.NewExecHandler(
|
||||
execHandler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
appName,
|
||||
componentName,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
"Executing post-start command in container",
|
||||
false, /* TODO */
|
||||
false,
|
||||
component.GetContainersNames(pod),
|
||||
"Executing post-start command in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PostStart commands
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
)
|
||||
err = libdevfile.ExecPostStartEvents(ctx, devfileObj, execHandler)
|
||||
if err != nil {
|
||||
@@ -84,15 +86,18 @@ func (o *DevClient) reconcile(
|
||||
|
||||
if execRequired {
|
||||
doExecuteBuildCommand := func() error {
|
||||
execHandler := component.NewExecHandler(
|
||||
execHandler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
appName,
|
||||
componentName,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
"Building your application in container",
|
||||
false, /* TODO */
|
||||
componentStatus.RunExecuted,
|
||||
component.GetContainersNames(pod),
|
||||
"Building your application in container",
|
||||
|
||||
// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PreStop events
|
||||
nil, nil, parser.DevfileObj{}, "",
|
||||
)
|
||||
return libdevfile.Build(ctx, devfileObj, options.BuildCommand, execHandler)
|
||||
}
|
||||
@@ -108,17 +113,24 @@ func (o *DevClient) reconcile(
|
||||
cmdKind = devfilev1.DebugCommandGroupKind
|
||||
cmdName = options.DebugCommand
|
||||
}
|
||||
cmdHandler := commandHandler{
|
||||
ctx: ctx,
|
||||
fs: o.fs,
|
||||
execClient: o.execClient,
|
||||
platformClient: o.podmanClient,
|
||||
componentExists: componentStatus.RunExecuted,
|
||||
podName: pod.Name,
|
||||
appName: appName,
|
||||
componentName: componentName,
|
||||
}
|
||||
err = libdevfile.ExecuteCommandByNameAndKind(ctx, devfileObj, cmdName, cmdKind, &cmdHandler, false)
|
||||
|
||||
cmdHandler := component.NewRunHandler(
|
||||
ctx,
|
||||
o.podmanClient,
|
||||
o.execClient,
|
||||
nil, // TODO(feloy) set this value when we want to support exec on new container on podman
|
||||
pod.Name,
|
||||
componentStatus.RunExecuted,
|
||||
component.GetContainersNames(pod),
|
||||
"",
|
||||
|
||||
o.fs,
|
||||
image.SelectBackend(ctx),
|
||||
|
||||
// TODO(feloy) set to deploy Kubernetes/Openshift components
|
||||
parser.DevfileObj{}, "",
|
||||
)
|
||||
err = libdevfile.ExecuteCommandByNameAndKind(ctx, devfileObj, cmdName, cmdKind, cmdHandler, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -188,7 +200,7 @@ func (o *DevClient) buildPushAutoImageComponents(ctx context.Context, devfileObj
|
||||
}
|
||||
|
||||
for _, c := range components {
|
||||
err = image.BuildPushSpecificImage(ctx, o.fs, c, envcontext.GetEnvConfig(ctx).PushImages)
|
||||
err = image.BuildPushSpecificImage(ctx, image.SelectBackend(ctx), o.fs, c, envcontext.GetEnvConfig(ctx).PushImages)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -32,16 +32,17 @@ var lookPathCmd = exec.LookPath
|
||||
|
||||
// BuildPushImages build all images defined in the devfile with the detected backend
|
||||
// If push is true, also push the images to their registries
|
||||
func BuildPushImages(ctx context.Context, fs filesystem.Filesystem, push bool) error {
|
||||
func BuildPushImages(ctx context.Context, backend Backend, fs filesystem.Filesystem, push bool) error {
|
||||
var (
|
||||
devfileObj = odocontext.GetEffectiveDevfileObj(ctx)
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
)
|
||||
|
||||
backend, err := selectBackend(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if backend == nil {
|
||||
//revive:disable:error-strings This is a top-level error message displayed as is to the end user
|
||||
return errors.New("odo requires either Podman or Docker to be installed in your environment. Please install one of them and try again.")
|
||||
//revive:enable:error-strings
|
||||
}
|
||||
|
||||
components, err := devfileObj.Data.GetComponents(common.DevfileOptions{
|
||||
@@ -65,15 +66,18 @@ func BuildPushImages(ctx context.Context, fs filesystem.Filesystem, push bool) e
|
||||
|
||||
// BuildPushSpecificImage build an image defined in the devfile present in devfilePath
|
||||
// If push is true, also push the image to its registry
|
||||
func BuildPushSpecificImage(ctx context.Context, fs filesystem.Filesystem, component devfile.Component, push bool) error {
|
||||
func BuildPushSpecificImage(ctx context.Context, backend Backend, fs filesystem.Filesystem, component devfile.Component, push bool) error {
|
||||
var (
|
||||
devfilePath = odocontext.GetDevfilePath(ctx)
|
||||
path = filepath.Dir(devfilePath)
|
||||
)
|
||||
backend, err := selectBackend(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if backend == nil {
|
||||
//revive:disable:error-strings This is a top-level error message displayed as is to the end user
|
||||
return errors.New("odo requires either Podman or Docker to be installed in your environment. Please install one of them and try again.")
|
||||
//revive:enable:error-strings
|
||||
}
|
||||
|
||||
return buildPushImage(backend, fs, component.Image, path, push)
|
||||
}
|
||||
|
||||
@@ -103,10 +107,10 @@ func buildPushImage(backend Backend, fs filesystem.Filesystem, image *devfile.Im
|
||||
return nil
|
||||
}
|
||||
|
||||
// selectBackend selects the container backend to use for building and pushing images
|
||||
// SelectBackend selects the container backend to use for building and pushing images
|
||||
// It will detect podman and docker CLIs (in this order),
|
||||
// or return an error if none are present locally
|
||||
func selectBackend(ctx context.Context) (Backend, error) {
|
||||
// or return nil if none are present locally
|
||||
func SelectBackend(ctx context.Context) Backend {
|
||||
|
||||
podmanCmd := envcontext.GetEnvConfig(ctx).PodmanCmd
|
||||
globalExtraArgs := envcontext.GetEnvConfig(ctx).OdoContainerBackendGlobalArgs
|
||||
@@ -127,14 +131,12 @@ func selectBackend(ctx context.Context) (Backend, error) {
|
||||
log.Warning("WARNING: Building images on Apple Silicon / M1 is not (yet) supported natively on Podman")
|
||||
log.Warning("There is however a temporary workaround: https://github.com/containers/podman/discussions/12899")
|
||||
}
|
||||
return NewDockerCompatibleBackend(podmanCmd, globalExtraArgs, buildExtraArgs), nil
|
||||
return NewDockerCompatibleBackend(podmanCmd, globalExtraArgs, buildExtraArgs)
|
||||
}
|
||||
|
||||
dockerCmd := envcontext.GetEnvConfig(ctx).DockerCmd
|
||||
if _, err := lookPathCmd(dockerCmd); err == nil {
|
||||
return NewDockerCompatibleBackend(dockerCmd, globalExtraArgs, buildExtraArgs), nil
|
||||
return NewDockerCompatibleBackend(dockerCmd, globalExtraArgs, buildExtraArgs)
|
||||
}
|
||||
//revive:disable:error-strings This is a top-level error message displayed as is to the end user
|
||||
return nil, errors.New("odo requires either Podman or Docker to be installed in your environment. Please install one of them and try again.")
|
||||
//revive:enable:error-strings
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -218,9 +218,9 @@ func TestSelectBackend(t *testing.T) {
|
||||
defer func() { lookPathCmd = exec.LookPath }()
|
||||
ctx := context.Background()
|
||||
ctx = envcontext.WithEnvConfig(ctx, tt.envConfig)
|
||||
backend, err := selectBackend(ctx)
|
||||
if tt.wantErr != (err != nil) {
|
||||
t.Errorf("%s: Error result wanted %v, got %v", tt.name, tt.wantErr, err != nil)
|
||||
backend := SelectBackend(ctx)
|
||||
if tt.wantErr != (backend == nil) {
|
||||
t.Errorf("%s: Error result wanted %v, got %v", tt.name, tt.wantErr, backend == nil)
|
||||
}
|
||||
if tt.wantErr == false {
|
||||
if tt.wantType != backend.String() {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
type command interface {
|
||||
CheckValidity() error
|
||||
Execute(ctx context.Context, handler Handler) error
|
||||
Execute(ctx context.Context, handler Handler, parentGroup *v1alpha2.CommandGroup) error
|
||||
}
|
||||
|
||||
// newCommand returns a command implementation, depending on the type of the command
|
||||
|
||||
@@ -28,7 +28,7 @@ func (o *applyCommand) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *applyCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
func (o *applyCommand) Execute(ctx context.Context, handler Handler, parentGroup *v1alpha2.CommandGroup) error {
|
||||
devfileComponents, err := o.devfileObj.Data.GetComponents(common.DevfileOptions{
|
||||
FilterByName: o.command.Apply.Component,
|
||||
})
|
||||
@@ -49,5 +49,12 @@ func (o *applyCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return component.Apply(handler)
|
||||
var kind v1alpha2.CommandGroupKind
|
||||
if o.command.Apply.Group != nil {
|
||||
kind = o.command.Apply.Group.Kind
|
||||
}
|
||||
if parentGroup != nil {
|
||||
kind = parentGroup.Kind
|
||||
}
|
||||
return component.Apply(handler, kind)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func Test_applyCommand_Execute(t *testing.T) {
|
||||
devfileObj: tt.fields.devfileObj(),
|
||||
}
|
||||
// TODO handler
|
||||
if err := o.Execute(context.Background(), nil); (err != nil) != tt.wantErr {
|
||||
if err := o.Execute(context.Background(), nil, nil); (err != nil) != tt.wantErr {
|
||||
t.Errorf("applyCommand.Execute() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -40,7 +40,7 @@ func (o *compositeCommand) CheckValidity() error {
|
||||
}
|
||||
|
||||
// Execute loops over each command and executes them serially
|
||||
func (o *compositeCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
func (o *compositeCommand) Execute(ctx context.Context, handler Handler, parentGroup *v1alpha2.CommandGroup) error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -50,7 +50,10 @@ func (o *compositeCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.Execute(ctx, handler)
|
||||
if parentGroup == nil {
|
||||
parentGroup = o.command.Composite.Group
|
||||
}
|
||||
err = cmd.Execute(ctx, handler, parentGroup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -42,11 +42,14 @@ func (o *parallelCompositeCommand) CheckValidity() error {
|
||||
}
|
||||
|
||||
// Execute loops over each command and executes them in parallel
|
||||
func (o *parallelCompositeCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
func (o *parallelCompositeCommand) Execute(ctx context.Context, handler Handler, parentGroup *v1alpha2.CommandGroup) error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parentGroup == nil {
|
||||
parentGroup = o.command.Composite.Group
|
||||
}
|
||||
commandExecs := util.NewConcurrentTasks(len(o.command.Composite.Commands))
|
||||
for _, devfileCmd := range o.command.Composite.Commands {
|
||||
cmd, err2 := newCommand(o.devfileObj, allCommands[devfileCmd])
|
||||
@@ -55,7 +58,7 @@ func (o *parallelCompositeCommand) Execute(ctx context.Context, handler Handler)
|
||||
}
|
||||
commandExecs.Add(util.ConcurrentTask{
|
||||
ToRun: func(errChannel chan error) {
|
||||
err3 := cmd.Execute(ctx, handler)
|
||||
err3 := cmd.Execute(ctx, handler, parentGroup)
|
||||
if err3 != nil {
|
||||
errChannel <- err3
|
||||
}
|
||||
|
||||
@@ -28,6 +28,26 @@ func (o *execCommand) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execCommand) Execute(ctx context.Context, handler Handler) error {
|
||||
return handler.Execute(ctx, o.command)
|
||||
func (o *execCommand) Execute(ctx context.Context, handler Handler, parentGroup *v1alpha2.CommandGroup) error {
|
||||
if o.isTerminating(parentGroup) {
|
||||
return handler.ExecuteTerminatingCommand(ctx, o.command)
|
||||
}
|
||||
return handler.ExecuteNonTerminatingCommand(ctx, o.command)
|
||||
}
|
||||
|
||||
// isTerminating returns true if not Run or Debug command
|
||||
func (o *execCommand) isTerminating(parentGroup *v1alpha2.CommandGroup) bool {
|
||||
if parentGroup != nil {
|
||||
kind := parentGroup.Kind
|
||||
return isTerminatingKind(kind)
|
||||
}
|
||||
if o.command.Exec.Group == nil {
|
||||
return true
|
||||
}
|
||||
kind := o.command.Exec.Group.Kind
|
||||
return isTerminatingKind(kind)
|
||||
}
|
||||
|
||||
func isTerminatingKind(kind v1alpha2.CommandGroupKind) bool {
|
||||
return kind != v1alpha2.RunCommandGroupKind && kind != v1alpha2.DebugCommandGroupKind
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
type component interface {
|
||||
CheckValidity() error
|
||||
Apply(handler Handler) error
|
||||
Apply(handler Handler, kind v1alpha2.CommandGroupKind) error
|
||||
}
|
||||
|
||||
// newComponent creates a concrete component, based on its type
|
||||
|
||||
@@ -24,6 +24,6 @@ func (e *containerComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *containerComponent) Apply(handler Handler) error {
|
||||
func (e *containerComponent) Apply(handler Handler, kind v1alpha2.CommandGroupKind) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func (e *imageComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *imageComponent) Apply(handler Handler) error {
|
||||
func (e *imageComponent) Apply(handler Handler, kind v1alpha2.CommandGroupKind) error {
|
||||
return handler.ApplyImage(e.component)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ func (e *kubernetesComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *kubernetesComponent) Apply(handler Handler) error {
|
||||
return handler.ApplyKubernetes(e.component)
|
||||
func (e *kubernetesComponent) Apply(handler Handler, kind v1alpha2.CommandGroupKind) error {
|
||||
return handler.ApplyKubernetes(e.component, kind)
|
||||
}
|
||||
|
||||
// GetK8sAndOcComponentsToPush returns the list of Kubernetes and OpenShift components to push,
|
||||
|
||||
@@ -24,6 +24,6 @@ func (e *openshiftComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *openshiftComponent) Apply(handler Handler) error {
|
||||
return handler.ApplyOpenShift(e.component)
|
||||
func (e *openshiftComponent) Apply(handler Handler, kind v1alpha2.CommandGroupKind) error {
|
||||
return handler.ApplyOpenShift(e.component, kind)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,6 @@ func (e *volumeComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *volumeComponent) Apply(handler Handler) error {
|
||||
func (e *volumeComponent) Apply(handler Handler, kind v1alpha2.CommandGroupKind) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -50,43 +50,57 @@ func (mr *MockHandlerMockRecorder) ApplyImage(image interface{}) *gomock.Call {
|
||||
}
|
||||
|
||||
// ApplyKubernetes mocks base method.
|
||||
func (m *MockHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
func (m *MockHandler) ApplyKubernetes(kubernetes v1alpha2.Component, kind v1alpha2.CommandGroupKind) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ApplyKubernetes", kubernetes)
|
||||
ret := m.ctrl.Call(m, "ApplyKubernetes", kubernetes, kind)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ApplyKubernetes indicates an expected call of ApplyKubernetes.
|
||||
func (mr *MockHandlerMockRecorder) ApplyKubernetes(kubernetes interface{}) *gomock.Call {
|
||||
func (mr *MockHandlerMockRecorder) ApplyKubernetes(kubernetes, kind interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubernetes", reflect.TypeOf((*MockHandler)(nil).ApplyKubernetes), kubernetes)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubernetes", reflect.TypeOf((*MockHandler)(nil).ApplyKubernetes), kubernetes, kind)
|
||||
}
|
||||
|
||||
// ApplyOpenShift mocks base method.
|
||||
func (m *MockHandler) ApplyOpenShift(openshift v1alpha2.Component) error {
|
||||
func (m *MockHandler) ApplyOpenShift(openshift v1alpha2.Component, kind v1alpha2.CommandGroupKind) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ApplyOpenShift", openshift)
|
||||
ret := m.ctrl.Call(m, "ApplyOpenShift", openshift, kind)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ApplyOpenShift indicates an expected call of ApplyOpenShift.
|
||||
func (mr *MockHandlerMockRecorder) ApplyOpenShift(openshift interface{}) *gomock.Call {
|
||||
func (mr *MockHandlerMockRecorder) ApplyOpenShift(openshift, kind interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyOpenShift", reflect.TypeOf((*MockHandler)(nil).ApplyOpenShift), openshift)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyOpenShift", reflect.TypeOf((*MockHandler)(nil).ApplyOpenShift), openshift, kind)
|
||||
}
|
||||
|
||||
// Execute mocks base method.
|
||||
func (m *MockHandler) Execute(ctx context.Context, command v1alpha2.Command) error {
|
||||
// ExecuteNonTerminatingCommand mocks base method.
|
||||
func (m *MockHandler) ExecuteNonTerminatingCommand(ctx context.Context, command v1alpha2.Command) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Execute", ctx, command)
|
||||
ret := m.ctrl.Call(m, "ExecuteNonTerminatingCommand", ctx, command)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Execute indicates an expected call of Execute.
|
||||
func (mr *MockHandlerMockRecorder) Execute(ctx, command interface{}) *gomock.Call {
|
||||
// ExecuteNonTerminatingCommand indicates an expected call of ExecuteNonTerminatingCommand.
|
||||
func (mr *MockHandlerMockRecorder) ExecuteNonTerminatingCommand(ctx, command interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockHandler)(nil).Execute), ctx, command)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteNonTerminatingCommand", reflect.TypeOf((*MockHandler)(nil).ExecuteNonTerminatingCommand), ctx, command)
|
||||
}
|
||||
|
||||
// ExecuteTerminatingCommand mocks base method.
|
||||
func (m *MockHandler) ExecuteTerminatingCommand(ctx context.Context, command v1alpha2.Command) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ExecuteTerminatingCommand", ctx, command)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ExecuteTerminatingCommand indicates an expected call of ExecuteTerminatingCommand.
|
||||
func (mr *MockHandlerMockRecorder) ExecuteTerminatingCommand(ctx, command interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTerminatingCommand", reflect.TypeOf((*MockHandler)(nil).ExecuteTerminatingCommand), ctx, command)
|
||||
}
|
||||
|
||||
@@ -22,9 +22,10 @@ const DebugEndpointNamePrefix = "debug"
|
||||
|
||||
type Handler interface {
|
||||
ApplyImage(image v1alpha2.Component) error
|
||||
ApplyKubernetes(kubernetes v1alpha2.Component) error
|
||||
ApplyOpenShift(openshift v1alpha2.Component) error
|
||||
Execute(ctx context.Context, command v1alpha2.Command) error
|
||||
ApplyKubernetes(kubernetes v1alpha2.Component, kind v1alpha2.CommandGroupKind) error
|
||||
ApplyOpenShift(openshift v1alpha2.Component, kind v1alpha2.CommandGroupKind) error
|
||||
ExecuteNonTerminatingCommand(ctx context.Context, command v1alpha2.Command) error
|
||||
ExecuteTerminatingCommand(ctx context.Context, command v1alpha2.Command) error
|
||||
}
|
||||
|
||||
// Deploy executes the default deploy command of the devfile.
|
||||
@@ -71,7 +72,7 @@ func executeCommand(ctx context.Context, devfileObj parser.DevfileObj, command v
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.Execute(ctx, handler)
|
||||
return cmd.Execute(ctx, handler, nil)
|
||||
}
|
||||
|
||||
// GetCommand iterates through the devfile commands and returns the devfile command with the specified name and group kind.
|
||||
@@ -263,7 +264,7 @@ func execDevfileEvent(ctx context.Context, devfileObj parser.DevfileObj, events
|
||||
return err
|
||||
}
|
||||
// Execute command in container
|
||||
err = c.Execute(ctx, handler)
|
||||
err = c.Execute(ctx, handler, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to execute devfile command %q: %w", commandName, err)
|
||||
}
|
||||
|
||||
@@ -419,8 +419,8 @@ func TestDeploy(t *testing.T) {
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().ApplyImage(imageComponent)
|
||||
h.EXPECT().ApplyKubernetes(deploymentComponent)
|
||||
h.EXPECT().ApplyKubernetes(serviceComponent)
|
||||
h.EXPECT().ApplyKubernetes(deploymentComponent, v1alpha2.DeployCommandGroupKind)
|
||||
h.EXPECT().ApplyKubernetes(serviceComponent, v1alpha2.DeployCommandGroupKind)
|
||||
return h
|
||||
},
|
||||
},
|
||||
@@ -485,6 +485,19 @@ func TestBuild(t *testing.T) {
|
||||
CommandLine: "build my-app",
|
||||
Component: containerComp.Name,
|
||||
})
|
||||
|
||||
nonDefaultRunCommand := generator.GetExecCommand(generator.ExecCommandParams{
|
||||
Kind: v1alpha2.RunCommandGroupKind,
|
||||
Id: "my-non-default-run-command",
|
||||
CommandLine: "run my-app",
|
||||
Component: containerComp.Name,
|
||||
})
|
||||
defaultBuildCommandComposite := generator.GetCompositeCommand(generator.CompositeCommandParams{
|
||||
Kind: v1alpha2.BuildCommandGroupKind,
|
||||
Id: "my-default-build-command-composite",
|
||||
IsDefault: pointer.Bool(true),
|
||||
Commands: []string{"my-non-default-run-command"},
|
||||
})
|
||||
type args struct {
|
||||
devfileObj func() parser.DevfileObj
|
||||
handler func(ctrl *gomock.Controller) Handler
|
||||
@@ -508,9 +521,9 @@ func TestBuild(t *testing.T) {
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
return h
|
||||
},
|
||||
},
|
||||
@@ -528,9 +541,9 @@ func TestBuild(t *testing.T) {
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(1)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(1)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
return h
|
||||
},
|
||||
},
|
||||
@@ -548,9 +561,9 @@ func TestBuild(t *testing.T) {
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
return h
|
||||
},
|
||||
cmdName: "my-explicit-non-default-build-command",
|
||||
@@ -570,14 +583,34 @@ func TestBuild(t *testing.T) {
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(1)
|
||||
h.EXPECT().Execute(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(defaultBuildCommand)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(1)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
return h
|
||||
},
|
||||
cmdName: "my-explicit-non-default-build-command",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with default composite command",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
dData, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = dData.AddCommands([]v1alpha2.Command{defaultBuildCommandComposite, nonDefaultRunCommand})
|
||||
_ = dData.AddComponents([]v1alpha2.Component{containerComp})
|
||||
return parser.DevfileObj{
|
||||
Data: dData,
|
||||
}
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultRunCommand)).Times(1)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandExplicit)).Times(0)
|
||||
h.EXPECT().ExecuteTerminatingCommand(gomock.Any(), gomock.Eq(nonDefaultBuildCommandImplicit)).Times(0)
|
||||
return h
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := Build(context.Background(), tt.args.devfileObj(), tt.args.cmdName, tt.args.handler(gomock.NewController(t)))
|
||||
|
||||
@@ -64,7 +64,7 @@ func (o *BuildImagesOptions) Validate(ctx context.Context) (err error) {
|
||||
|
||||
// Run contains the logic for the odo command
|
||||
func (o *BuildImagesOptions) Run(ctx context.Context) (err error) {
|
||||
return image.BuildPushImages(ctx, o.clientset.FS, o.pushFlag)
|
||||
return image.BuildPushImages(ctx, image.SelectBackend(ctx), o.clientset.FS, o.pushFlag)
|
||||
}
|
||||
|
||||
// NewCmdBuildImages implements the odo command
|
||||
|
||||
@@ -94,7 +94,7 @@ const (
|
||||
var subdeps map[string][]string = map[string][]string{
|
||||
ALIZER: {REGISTRY},
|
||||
CONFIG_AUTOMOUNT: {KUBERNETES_NULLABLE, PODMAN_NULLABLE},
|
||||
DELETE_COMPONENT: {KUBERNETES_NULLABLE, PODMAN_NULLABLE, EXEC},
|
||||
DELETE_COMPONENT: {KUBERNETES_NULLABLE, PODMAN_NULLABLE, EXEC, CONFIG_AUTOMOUNT},
|
||||
DEPLOY: {KUBERNETES, FILESYSTEM, CONFIG_AUTOMOUNT},
|
||||
DEV: {
|
||||
BINDING,
|
||||
@@ -230,7 +230,7 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
|
||||
}
|
||||
}
|
||||
if isDefined(command, DELETE_COMPONENT) {
|
||||
dep.DeleteClient = _delete.NewDeleteComponentClient(dep.KubernetesClient, dep.PodmanClient, dep.ExecClient)
|
||||
dep.DeleteClient = _delete.NewDeleteComponentClient(dep.KubernetesClient, dep.PodmanClient, dep.ExecClient, dep.ConfigAutomountClient)
|
||||
}
|
||||
if isDefined(command, DEPLOY) {
|
||||
dep.DeployClient = deploy.NewDeployClient(dep.KubernetesClient, dep.ConfigAutomountClient, dep.FS)
|
||||
|
||||
127
pkg/platform/mock.go
Normal file
127
pkg/platform/mock.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: pkg/platform/interface.go
|
||||
|
||||
// Package platform is a generated GoMock package.
|
||||
package platform
|
||||
|
||||
import (
|
||||
context "context"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface.
|
||||
type MockClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockClientMockRecorder
|
||||
}
|
||||
|
||||
// MockClientMockRecorder is the mock recorder for MockClient.
|
||||
type MockClientMockRecorder struct {
|
||||
mock *MockClient
|
||||
}
|
||||
|
||||
// NewMockClient creates a new mock instance.
|
||||
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||
mock := &MockClient{ctrl: ctrl}
|
||||
mock.recorder = &MockClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// ExecCMDInContainer mocks base method.
|
||||
func (m *MockClient) ExecCMDInContainer(ctx context.Context, containerName, podName string, cmd []string, stdout, stderr io.Writer, stdin io.Reader, tty bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ExecCMDInContainer", ctx, containerName, podName, cmd, stdout, stderr, stdin, tty)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ExecCMDInContainer indicates an expected call of ExecCMDInContainer.
|
||||
func (mr *MockClientMockRecorder) ExecCMDInContainer(ctx, containerName, podName, cmd, stdout, stderr, stdin, tty interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecCMDInContainer", reflect.TypeOf((*MockClient)(nil).ExecCMDInContainer), ctx, containerName, podName, cmd, stdout, stderr, stdin, tty)
|
||||
}
|
||||
|
||||
// GetAllPodsInNamespaceMatchingSelector mocks base method.
|
||||
func (m *MockClient) GetAllPodsInNamespaceMatchingSelector(selector, ns string) (*v1.PodList, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAllPodsInNamespaceMatchingSelector", selector, ns)
|
||||
ret0, _ := ret[0].(*v1.PodList)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAllPodsInNamespaceMatchingSelector indicates an expected call of GetAllPodsInNamespaceMatchingSelector.
|
||||
func (mr *MockClientMockRecorder) GetAllPodsInNamespaceMatchingSelector(selector, ns interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllPodsInNamespaceMatchingSelector", reflect.TypeOf((*MockClient)(nil).GetAllPodsInNamespaceMatchingSelector), selector, ns)
|
||||
}
|
||||
|
||||
// GetAllResourcesFromSelector mocks base method.
|
||||
func (m *MockClient) GetAllResourcesFromSelector(selector, ns string) ([]unstructured.Unstructured, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAllResourcesFromSelector", selector, ns)
|
||||
ret0, _ := ret[0].([]unstructured.Unstructured)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAllResourcesFromSelector indicates an expected call of GetAllResourcesFromSelector.
|
||||
func (mr *MockClientMockRecorder) GetAllResourcesFromSelector(selector, ns interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllResourcesFromSelector", reflect.TypeOf((*MockClient)(nil).GetAllResourcesFromSelector), selector, ns)
|
||||
}
|
||||
|
||||
// GetPodLogs mocks base method.
|
||||
func (m *MockClient) GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPodLogs", podName, containerName, followLog)
|
||||
ret0, _ := ret[0].(io.ReadCloser)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPodLogs indicates an expected call of GetPodLogs.
|
||||
func (mr *MockClientMockRecorder) GetPodLogs(podName, containerName, followLog interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodLogs", reflect.TypeOf((*MockClient)(nil).GetPodLogs), podName, containerName, followLog)
|
||||
}
|
||||
|
||||
// GetPodsMatchingSelector mocks base method.
|
||||
func (m *MockClient) GetPodsMatchingSelector(selector string) (*v1.PodList, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPodsMatchingSelector", selector)
|
||||
ret0, _ := ret[0].(*v1.PodList)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPodsMatchingSelector indicates an expected call of GetPodsMatchingSelector.
|
||||
func (mr *MockClientMockRecorder) GetPodsMatchingSelector(selector interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsMatchingSelector", reflect.TypeOf((*MockClient)(nil).GetPodsMatchingSelector), selector)
|
||||
}
|
||||
|
||||
// GetRunningPodFromSelector mocks base method.
|
||||
func (m *MockClient) GetRunningPodFromSelector(selector string) (*v1.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetRunningPodFromSelector", selector)
|
||||
ret0, _ := ret[0].(*v1.Pod)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetRunningPodFromSelector indicates an expected call of GetRunningPodFromSelector.
|
||||
func (mr *MockClientMockRecorder) GetRunningPodFromSelector(selector interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningPodFromSelector", reflect.TypeOf((*MockClient)(nil).GetRunningPodFromSelector), selector)
|
||||
}
|
||||
@@ -8,12 +8,19 @@ import (
|
||||
)
|
||||
|
||||
// CreateFakePod creates a fake pod with the given pod name and component name
|
||||
func CreateFakePod(componentName, podName string) *corev1.Pod {
|
||||
func CreateFakePod(componentName, podName, containerName string) *corev1.Pod {
|
||||
fakePod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: odolabels.GetLabels(componentName, "app", "", odolabels.ComponentDevMode, false),
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodRunning,
|
||||
},
|
||||
|
||||
@@ -99,3 +99,7 @@ $mockgen -source=pkg/podman/interface.go \
|
||||
$mockgen -source=pkg/configAutomount/interface.go \
|
||||
-package configAutomount \
|
||||
-destination pkg/configAutomount/mock.go
|
||||
|
||||
$mockgen -source=pkg/platform/interface.go \
|
||||
-package platform \
|
||||
-destination pkg/platform/mock.go
|
||||
|
||||
@@ -224,7 +224,7 @@ func CommonBeforeEach() CommonVar {
|
||||
}
|
||||
Expect(resetErr).ShouldNot(HaveOccurred())
|
||||
})
|
||||
Expect(os.Setenv("PODMAN_CMD_INIT_TIMEOUT", "10s")).ShouldNot(HaveOccurred())
|
||||
Expect(os.Setenv("PODMAN_CMD_INIT_TIMEOUT", "30s")).ShouldNot(HaveOccurred())
|
||||
|
||||
// Generate a dedicated containers.conf with a specific namespace
|
||||
GenerateAndSetContainersConf(commonVar.ConfigDir)
|
||||
|
||||
@@ -47,9 +47,8 @@ func ExtractK8sAndOcComponentsFromOutputOnPodman(out string) []string {
|
||||
// Example lines to match:
|
||||
// ⚠ Kubernetes components are not supported on Podman. Skipping: k8s-deploybydefault-true-and-referenced, k8s-deploybydefault-true-and-not-referenced.
|
||||
// ⚠ OpenShift components are not supported on Podman. Skipping: ocp-deploybydefault-true-and-referenced.
|
||||
// ⚠ Apply OpenShift components are not supported on Podman. Skipping: k8s-deploybydefault-true-and-referenced.
|
||||
// ⚠ Apply OpenShift components are not supported on Podman. Skipping: k8s-deploybydefault-true-and-referenced.
|
||||
re := regexp.MustCompile(`(?:Kubernetes|OpenShift) components are not supported on Podman\.\s*Skipping:\s*([^\n]+)\.`)
|
||||
// ⚠ Apply Kubernetes/Openshift components are not supported on Podman. Skipping: k8s-deploybydefault-true-and-referenced.
|
||||
re := regexp.MustCompile(`(?:Kubernetes|OpenShift|Kubernetes/Openshift) components are not supported on Podman\.\s*Skipping:\s*([^\n]+)\.`)
|
||||
for _, l := range lines {
|
||||
matches := re.FindStringSubmatch(l)
|
||||
if len(matches) > 1 {
|
||||
|
||||
@@ -3597,7 +3597,7 @@ CMD ["npm", "start"]
|
||||
})
|
||||
It("should show warning about being unable to create the resource when running odo dev on podman", func() {
|
||||
Expect(string(errContents)).To(ContainSubstring("Kubernetes components are not supported on Podman. Skipping: "))
|
||||
Expect(string(errContents)).To(ContainSubstring("Apply Kubernetes components are not supported on Podman. Skipping: "))
|
||||
Expect(string(errContents)).To(ContainSubstring("Apply Kubernetes/Openshift components are not supported on Podman. Skipping: "))
|
||||
helper.MatchAllInOutput(string(errContents), []string{"deploy-k8s-resource", "deploy-a-third-k8s-resource"})
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user