mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
Deploy, Events without devfile/adapters (#5460)
* Execute devfile command * Undeploy * cleanup devfile/adapters * refactor * Move GetOnePod to component package * Move DoesComponentExist and Log from devfile/adapter to component package * Exec without devfile/adapters * Move Delete from devfile/adapters to component * Remove old Deploy code * review * Add tests for issue 5454 * Review
This commit is contained in:
@@ -3,6 +3,7 @@ package component
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/api/v2/pkg/devfile"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
parsercommon "github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
@@ -20,7 +22,9 @@ import (
|
||||
"github.com/redhat-developer/odo/pkg/devfile/location"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/localConfigProvider"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/preference"
|
||||
"github.com/redhat-developer/odo/pkg/service"
|
||||
urlpkg "github.com/redhat-developer/odo/pkg/url"
|
||||
@@ -30,6 +34,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const componentRandomNamePartsMaxLen = 12
|
||||
@@ -503,3 +509,87 @@ func setLinksServiceNames(client kclient.ClientInterface, linkedSecrets []Secret
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOnePod gets a pod using the component and app name
|
||||
func GetOnePod(client kclient.ClientInterface, componentName string, appName string) (*corev1.Pod, error) {
|
||||
return client.GetOnePodFromSelector(componentlabels.GetSelector(componentName, appName))
|
||||
}
|
||||
|
||||
// ComponentExists checks whether a deployment by the given name exists in the given app
|
||||
func ComponentExists(client kclient.ClientInterface, name string, app string) (bool, error) {
|
||||
deployment, err := client.GetOneDeployment(name, app)
|
||||
if _, ok := err.(*kclient.DeploymentNotFoundError); ok {
|
||||
klog.V(2).Infof("Deployment %s not found for belonging to the %s app ", name, app)
|
||||
return false, nil
|
||||
}
|
||||
return deployment != nil, err
|
||||
}
|
||||
|
||||
// Log returns log from component
|
||||
func Log(client kclient.ClientInterface, componentName string, appName string, follow bool, command v1alpha2.Command) (io.ReadCloser, error) {
|
||||
|
||||
pod, err := GetOnePod(client, componentName, appName)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("the component %s doesn't exist on the cluster", componentName)
|
||||
}
|
||||
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return nil, errors.Errorf("unable to show logs, component is not in running state. current status=%v", pod.Status.Phase)
|
||||
}
|
||||
|
||||
containerName := command.Exec.Component
|
||||
|
||||
return client.GetPodLogs(pod.Name, containerName, follow)
|
||||
}
|
||||
|
||||
// Delete deletes the component
|
||||
func Delete(kubeClient kclient.ClientInterface, devfileObj parser.DevfileObj, componentName string, appName string, labels map[string]string, show bool, wait bool) error {
|
||||
if labels == nil {
|
||||
return fmt.Errorf("cannot delete with labels being nil")
|
||||
}
|
||||
log.Printf("Gathering information for component: %q", componentName)
|
||||
podSpinner := log.Spinner("Checking status for component")
|
||||
defer podSpinner.End(false)
|
||||
|
||||
pod, err := GetOnePod(kubeClient, componentName, appName)
|
||||
if kerrors.IsForbidden(err) {
|
||||
klog.V(2).Infof("Resource for %s forbidden", componentName)
|
||||
// log the error if it failed to determine if the component exists due to insufficient RBACs
|
||||
podSpinner.End(false)
|
||||
log.Warningf("%v", err)
|
||||
return nil
|
||||
} else if e, ok := err.(*kclient.PodNotFoundError); ok {
|
||||
podSpinner.End(false)
|
||||
log.Warningf("%v", e)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "unable to determine if component %s exists", componentName)
|
||||
}
|
||||
|
||||
podSpinner.End(true)
|
||||
|
||||
// if there are preStop events, execute them before deleting the deployment
|
||||
if libdevfile.HasPreStopEvents(devfileObj) {
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("unable to execute preStop events, pod for component %s is not running", componentName)
|
||||
}
|
||||
log.Infof("\nExecuting %s event commands for component %s", libdevfile.PreStop, componentName)
|
||||
err = libdevfile.ExecPreStopEvents(devfileObj, componentName, NewExecHandler(kubeClient, pod.Name, show))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("\nDeleting component %s", componentName)
|
||||
spinner := log.Spinner("Deleting Kubernetes resources for component")
|
||||
defer spinner.End(false)
|
||||
|
||||
err = kubeClient.Delete(labels, wait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spinner.End(true)
|
||||
log.Successf("Successfully deleted component")
|
||||
return nil
|
||||
}
|
||||
|
||||
156
pkg/component/exec_handler.go
Normal file
156
pkg/component/exec_handler.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type execHandler struct {
|
||||
kubeClient kclient.ClientInterface
|
||||
podName string
|
||||
show bool
|
||||
}
|
||||
|
||||
const ShellExecutable string = "/bin/sh"
|
||||
|
||||
func NewExecHandler(kubeClient kclient.ClientInterface, podName string, show bool) *execHandler {
|
||||
return &execHandler{
|
||||
kubeClient: kubeClient,
|
||||
podName: podName,
|
||||
show: show,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *execHandler) ApplyImage(image v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execHandler) Execute(command v1alpha2.Command) error {
|
||||
msg := fmt.Sprintf("Executing %s command %q on container %q", command.Id, command.Exec.CommandLine, command.Exec.Component)
|
||||
spinner := log.Spinner(msg)
|
||||
defer spinner.End(false)
|
||||
|
||||
logger := machineoutput.NewMachineEventLoggingClient()
|
||||
stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := logger.CreateContainerOutputWriter()
|
||||
|
||||
cmdline := getCmdline(command)
|
||||
err := executeCommand(o.kubeClient, command.Exec.Component, o.podName, cmdline, o.show, stdoutWriter, stderrWriter)
|
||||
|
||||
closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel)
|
||||
|
||||
spinner.End(true)
|
||||
return err
|
||||
}
|
||||
|
||||
func getCmdline(command v1alpha2.Command) []string {
|
||||
// deal with environment variables
|
||||
var cmdLine string
|
||||
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)
|
||||
|
||||
if setEnvVariable == "" {
|
||||
cmdLine = command.Exec.CommandLine
|
||||
} else {
|
||||
cmdLine = setEnvVariable + " && " + command.Exec.CommandLine
|
||||
}
|
||||
|
||||
// Change to the workdir and execute the command
|
||||
var cmd []string
|
||||
if command.Exec.WorkingDir != "" {
|
||||
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
|
||||
cmd = []string{ShellExecutable, "-c", "cd " + command.Exec.WorkingDir + " && " + cmdLine}
|
||||
} else {
|
||||
cmd = []string{ShellExecutable, "-c", cmdLine}
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func closeWriterAndWaitForAck(stdoutWriter *io.PipeWriter, stdoutChannel chan interface{}, stderrWriter *io.PipeWriter, stderrChannel chan interface{}) {
|
||||
if stdoutWriter != nil {
|
||||
_ = stdoutWriter.Close()
|
||||
<-stdoutChannel
|
||||
}
|
||||
if stderrWriter != nil {
|
||||
_ = stderrWriter.Close()
|
||||
<-stderrChannel
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteCommand executes the given command in the pod's container
|
||||
func executeCommand(client kclient.ClientInterface, containerName string, podName string, command []string, show bool, consoleOutputStdout *io.PipeWriter, consoleOutputStderr *io.PipeWriter) (err error) {
|
||||
stdoutReader, stdoutWriter := io.Pipe()
|
||||
stderrReader, stderrWriter := io.Pipe()
|
||||
|
||||
var cmdOutput string
|
||||
|
||||
klog.V(2).Infof("Executing command %v for pod: %v in container: %v", command, podName, containerName)
|
||||
|
||||
// Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil)
|
||||
stdoutCompleteChannel := startReaderGoroutine(stdoutReader, show, &cmdOutput, consoleOutputStdout)
|
||||
stderrCompleteChannel := startReaderGoroutine(stderrReader, show, &cmdOutput, consoleOutputStderr)
|
||||
|
||||
err = client.ExecCMDInContainer(containerName, podName, command, stdoutWriter, stderrWriter, nil, false)
|
||||
|
||||
// Block until we have received all the container output from each stream
|
||||
_ = stdoutWriter.Close()
|
||||
<-stdoutCompleteChannel
|
||||
_ = stderrWriter.Close()
|
||||
<-stderrCompleteChannel
|
||||
|
||||
if err != nil {
|
||||
// It is safe to read from cmdOutput here, as the goroutines are guaranteed to have terminated at this point.
|
||||
klog.V(2).Infof("ExecuteCommand returned an an err: %v. for command '%v'. output: %v", err, command, cmdOutput)
|
||||
|
||||
return errors.Wrapf(err, "unable to exec command %v: \n%v", command, cmdOutput)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// This goroutine will automatically pipe the output from the writer (passed into ExecCMDInContainer) to
|
||||
// the loggers.
|
||||
// The returned channel will contain a single nil entry once the reader has closed.
|
||||
func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *string, consoleOutput *io.PipeWriter) chan interface{} {
|
||||
|
||||
result := make(chan interface{})
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if log.IsDebug() || show {
|
||||
_, err := fmt.Fprintln(os.Stdout, line)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to print to stdout: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
*cmdOutput += fmt.Sprintln(line)
|
||||
|
||||
if consoleOutput != nil {
|
||||
_, err := consoleOutput.Write([]byte(line + "\n"))
|
||||
if err != nil {
|
||||
log.Errorf("Error occurred on writing string to consoleOutput writer: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
result <- nil
|
||||
}()
|
||||
|
||||
return result
|
||||
|
||||
}
|
||||
82
pkg/deploy/deploy.go
Normal file
82
pkg/deploy/deploy.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package deploy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
devfilefs "github.com/devfile/library/pkg/testingutil/filesystem"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/service"
|
||||
)
|
||||
|
||||
type DeployClient struct {
|
||||
kubeClient kclient.ClientInterface
|
||||
}
|
||||
|
||||
func NewDeployClient(kubeClient kclient.ClientInterface) *DeployClient {
|
||||
return &DeployClient{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *DeployClient) Deploy(devfileObj parser.DevfileObj, path string, appName string) error {
|
||||
deployHandler := newDeployHandler(devfileObj, path, o.kubeClient, appName)
|
||||
return libdevfile.Deploy(devfileObj, deployHandler)
|
||||
}
|
||||
|
||||
type deployHandler struct {
|
||||
devfileObj parser.DevfileObj
|
||||
path string
|
||||
kubeClient kclient.ClientInterface
|
||||
appName string
|
||||
}
|
||||
|
||||
func newDeployHandler(devfileObj parser.DevfileObj, path string, kubeClient kclient.ClientInterface, appName string) *deployHandler {
|
||||
return &deployHandler{
|
||||
devfileObj: devfileObj,
|
||||
path: path,
|
||||
kubeClient: kubeClient,
|
||||
appName: appName,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *deployHandler) ApplyImage(img v1alpha2.Component) error {
|
||||
return image.BuildPushSpecificImage(o.devfileObj, o.path, img, true)
|
||||
}
|
||||
|
||||
func (o *deployHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
// validate if the GVRs represented by Kubernetes inlined components are supported by the underlying cluster
|
||||
_, err := service.ValidateResourceExist(o.kubeClient, kubernetes, o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labels := componentlabels.GetLabels(kubernetes.Name, o.appName, true)
|
||||
u, err := service.GetK8sComponentAsUnstructured(kubernetes.Kubernetes, o.path, devfilefs.DefaultFs{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("\nDeploying Kubernetes %s: %s", u.GetKind(), u.GetName())
|
||||
isOperatorBackedService, err := service.PushKubernetesResource(o.kubeClient, u, labels)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create service(s) associated with the component")
|
||||
}
|
||||
if isOperatorBackedService {
|
||||
log.Successf("Kubernetes resource %q on the cluster; refer %q to know how to link it to the component", strings.Join([]string{u.GetKind(), u.GetName()}, "/"), "odo link -h")
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *deployHandler) Execute(command v1alpha2.Command) error {
|
||||
return errors.New("Exec command is not implemented for Deploy")
|
||||
}
|
||||
8
pkg/deploy/interface.go
Normal file
8
pkg/deploy/interface.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package deploy
|
||||
|
||||
import "github.com/devfile/library/pkg/devfile/parser"
|
||||
|
||||
type Client interface {
|
||||
// Deploy resources from a devfile located in path, for the specified appName
|
||||
Deploy(devfileObj parser.DevfileObj, path string, appName string) error
|
||||
}
|
||||
49
pkg/deploy/mock.go
Normal file
49
pkg/deploy/mock.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: pkg/deploy/interface.go
|
||||
|
||||
// Package deploy is a generated GoMock package.
|
||||
package deploy
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
parser "github.com/devfile/library/pkg/devfile/parser"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface.
|
||||
type MockClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockClientMockRecorder
|
||||
}
|
||||
|
||||
// MockClientMockRecorder is the mock recorder for MockClient.
|
||||
type MockClientMockRecorder struct {
|
||||
mock *MockClient
|
||||
}
|
||||
|
||||
// NewMockClient creates a new mock instance.
|
||||
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||
mock := &MockClient{ctrl: ctrl}
|
||||
mock.recorder = &MockClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Deploy mocks base method.
|
||||
func (m *MockClient) Deploy(devfileObj parser.DevfileObj, path, appName string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Deploy", devfileObj, path, appName)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Deploy indicates an expected call of Deploy.
|
||||
func (mr *MockClientMockRecorder) Deploy(devfileObj, path, appName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deploy", reflect.TypeOf((*MockClient)(nil).Deploy), devfileObj, path, appName)
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package common
|
||||
|
||||
// ApplyClient is a wrapper around ApplyComponent which runs an apply command on a component
|
||||
type ApplyClient interface {
|
||||
ApplyComponent(component string) error
|
||||
UnApplyComponent(component string) error
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func New(devfile devfilev1.Command, knowCommands map[string]devfilev1.Command, e
|
||||
} else if devfile.Exec != nil {
|
||||
return newExecCommand(devfile, executor)
|
||||
} else {
|
||||
return newApplyCommand(devfile, executor)
|
||||
return nil, errors.New("apply command not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,11 +153,6 @@ func GetTestCommand(data data.DevfileData, devfileTestCmd string) (runCommand de
|
||||
return getCommand(data, devfileTestCmd, devfilev1.TestCommandGroupKind)
|
||||
}
|
||||
|
||||
// GetDeployCommand iterates through the components in the devfile and returns the deploy command
|
||||
func GetDeployCommand(data data.DevfileData, devfileDeployCmd string) (deployCommand devfilev1.Command, err error) {
|
||||
return getCommand(data, devfileDeployCmd, devfilev1.DeployCommandGroupKind)
|
||||
}
|
||||
|
||||
// ValidateAndGetPushDevfileCommands validates the build and the run command,
|
||||
// if provided through odo push or else checks the devfile for devBuild and devRun.
|
||||
// It returns the build and run commands if its validated successfully, error otherwise.
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
)
|
||||
|
||||
// applyCommand is a command implementation for Apply commands
|
||||
type applyCommand struct {
|
||||
adapter commandExecutor
|
||||
id string
|
||||
component string
|
||||
}
|
||||
|
||||
// newApplyCommand creates a new applyCommand instance, adapting the devfile-defined command to run in the target component's container
|
||||
func newApplyCommand(command devfilev1.Command, executor commandExecutor) (command, error) {
|
||||
apply := command.Apply
|
||||
return &applyCommand{
|
||||
adapter: executor,
|
||||
id: command.Id,
|
||||
component: apply.Component,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s applyCommand) Execute(show bool) error {
|
||||
err := s.adapter.ApplyComponent(s.component)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s applyCommand) UnExecute() error {
|
||||
err := s.adapter.UnApplyComponent(s.component)
|
||||
return err
|
||||
}
|
||||
@@ -6,24 +6,6 @@ import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
)
|
||||
|
||||
// NoDefaultForGroup indicates a error when no default command was found for the given Group
|
||||
type NoDefaultForGroup struct {
|
||||
Group v1alpha2.CommandGroupKind
|
||||
}
|
||||
|
||||
func (n NoDefaultForGroup) Error() string {
|
||||
return fmt.Sprintf("there should be exactly one default command for command group %v, currently there is no default command", n.Group)
|
||||
}
|
||||
|
||||
// MoreDefaultForGroup indicates a error when more than one default command was found for the given Group
|
||||
type MoreDefaultForGroup struct {
|
||||
Group v1alpha2.CommandGroupKind
|
||||
}
|
||||
|
||||
func (m MoreDefaultForGroup) Error() string {
|
||||
return fmt.Sprintf("there should be exactly one default command for command group %v, currently there is more than one default command", m.Group)
|
||||
}
|
||||
|
||||
// NoCommandForGroup indicates a error when no command was found for the given Group
|
||||
type NoCommandForGroup struct {
|
||||
Group v1alpha2.CommandGroupKind
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
// commandExecutor defines the interface adapters must implement to be able to execute commands in a generic way
|
||||
type commandExecutor interface {
|
||||
ExecClient
|
||||
ApplyClient
|
||||
// Logger returns the MachineEventLoggingClient associated with this executor
|
||||
Logger() machineoutput.MachineEventLoggingClient
|
||||
// ComponentInfo retrieves the component information associated with the specified command
|
||||
|
||||
@@ -2,12 +2,10 @@ package common
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
"k8s.io/klog"
|
||||
@@ -51,10 +49,6 @@ func (a GenericAdapter) Logger() machineoutput.MachineEventLoggingClient {
|
||||
return a.logger
|
||||
}
|
||||
|
||||
func (a *GenericAdapter) SetLogger(loggingClient machineoutput.MachineEventLoggingClient) {
|
||||
a.logger = loggingClient
|
||||
}
|
||||
|
||||
func (a GenericAdapter) ComponentInfo(command devfilev1.Command) (ComponentInfo, error) {
|
||||
return a.componentInfo(command)
|
||||
}
|
||||
@@ -63,25 +57,6 @@ func (a GenericAdapter) SupervisorComponentInfo(command devfilev1.Command) (Comp
|
||||
return a.supervisordComponentInfo(command)
|
||||
}
|
||||
|
||||
// ExecuteCommand simply calls exec.ExecuteCommand using the GenericAdapter's client
|
||||
func (a GenericAdapter) ExecuteCommand(compInfo ComponentInfo, command []string, show bool, consoleOutputStdout *io.PipeWriter, consoleOutputStderr *io.PipeWriter) (err error) {
|
||||
return ExecuteCommand(a.client, compInfo, command, show, consoleOutputStdout, consoleOutputStderr)
|
||||
}
|
||||
|
||||
// ExecuteDevfileCommand executes the devfile init, build and test command actions synchronously
|
||||
func (a GenericAdapter) ExecuteDevfileCommand(command devfilev1.Command, show bool) error {
|
||||
commands, err := a.Devfile.Data.GetCommands(common.DevfileOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := New(command, GetCommandsMap(commands), a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Execute(show)
|
||||
}
|
||||
|
||||
// closeWriterAndWaitForAck closes the PipeWriter and then waits for a channel response from the ContainerOutputWriter (indicating that the reader had closed).
|
||||
// This ensures that we always get the full stderr/stdout output from the container process BEFORE we output the devfileCommandExecution event.
|
||||
func closeWriterAndWaitForAck(stdoutWriter *io.PipeWriter, stdoutChannel chan interface{}, stderrWriter *io.PipeWriter, stderrChannel chan interface{}) {
|
||||
@@ -191,44 +166,3 @@ func (a GenericAdapter) addToComposite(commandsMap PushCommandsMap, groupType de
|
||||
}
|
||||
return commands, nil
|
||||
}
|
||||
|
||||
// ExecDevfileEvent receives a Devfile Event (PostStart, PreStop etc.) and loops through them
|
||||
// Each Devfile Command associated with the given event is retrieved, and executed in the container specified
|
||||
// in the command
|
||||
func (a GenericAdapter) ExecDevfileEvent(events []string, eventType DevfileEventType, show bool) error {
|
||||
if len(events) > 0 {
|
||||
log.Infof("\nExecuting %s event commands for component %s", string(eventType), a.ComponentName)
|
||||
commands, err := a.Devfile.Data.GetCommands(common.DevfileOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commandMap := GetCommandsMap(commands)
|
||||
for _, commandName := range events {
|
||||
// Convert commandName to lower because GetCommands converts Command.Exec.Id's to lower
|
||||
command, ok := commandMap[strings.ToLower(commandName)]
|
||||
if !ok {
|
||||
return errors.New("unable to find devfile command " + commandName)
|
||||
}
|
||||
|
||||
c, err := New(command, commandMap, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute command in container
|
||||
err = c.Execute(show)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to execute devfile command %s", commandName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a GenericAdapter) ApplyComponent(component string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a GenericAdapter) UnApplyComponent(component string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
devfileParser "github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// Create a simple mock client for the ExecClient interface for the devfile exec unit tests.
|
||||
type mockExecClient struct {
|
||||
}
|
||||
|
||||
type mockExecErrorClient struct {
|
||||
}
|
||||
|
||||
func (fc mockExecClient) ExecCMDInContainer(compInfo ComponentInfo, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc mockExecErrorClient) ExecCMDInContainer(compInfo ComponentInfo, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error {
|
||||
return fmt.Errorf("exec error in container %s", compInfo.ContainerName)
|
||||
}
|
||||
|
||||
func TestExecuteDevfileCommand(t *testing.T) {
|
||||
var fakeExecClient mockExecClient
|
||||
var fakeExecErrorClient mockExecErrorClient
|
||||
compInfo := ComponentInfo{
|
||||
ContainerName: "some-container",
|
||||
}
|
||||
cif := func(command devfilev1.Command) (ComponentInfo, error) {
|
||||
return compInfo, nil
|
||||
}
|
||||
|
||||
commands := []string{"command1", "command2", "command3", "command4"}
|
||||
tests := []struct {
|
||||
name string
|
||||
commands []devfilev1.Command
|
||||
cmd devfilev1.Command
|
||||
execClient ExecClient
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Case 1: Non-parallel, successful exec",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2],
|
||||
devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[1]},
|
||||
Parallel: util.GetBoolPtr(false),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Case 2: Non-parallel, failed exec",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[1]},
|
||||
Parallel: util.GetBoolPtr(false),
|
||||
}),
|
||||
execClient: fakeExecErrorClient,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Case 3: Parallel, successful exec",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[1]},
|
||||
Parallel: util.GetBoolPtr(true),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Case 4: Parallel, failed exec",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[1]},
|
||||
Parallel: util.GetBoolPtr(true),
|
||||
}),
|
||||
execClient: fakeExecErrorClient,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Case 5: Non-Parallel, command not found",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], "fake-command"},
|
||||
Parallel: util.GetBoolPtr(false),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Case 6: Parallel, command not found",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[2], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], "fake-command"},
|
||||
Parallel: util.GetBoolPtr(true),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Case 7: Nested composite commands",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{commands[0], commands[1]}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[3],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[3], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[2]},
|
||||
Parallel: util.GetBoolPtr(false),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Case 8: Nested parallel composite commands",
|
||||
commands: []devfilev1.Command{
|
||||
{
|
||||
Id: commands[0],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[1],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Exec: &devfilev1.ExecCommand{HotReloadCapable: util.GetBoolPtr(false)},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[2],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{commands[0], commands[1]}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: commands[3],
|
||||
CommandUnion: devfilev1.CommandUnion{
|
||||
Composite: &devfilev1.CompositeCommand{Commands: []string{""}},
|
||||
},
|
||||
},
|
||||
},
|
||||
cmd: createCommandFrom(commands[3], devfilev1.CompositeCommand{
|
||||
Commands: []string{commands[0], commands[2]},
|
||||
Parallel: util.GetBoolPtr(true),
|
||||
}),
|
||||
execClient: fakeExecClient,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := adapter(tt.execClient, tt.commands, cif).ExecuteDevfileCommand(tt.cmd, false)
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf("expected %v, wanted %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func adapter(fakeExecClient ExecClient, commands []devfilev1.Command, cif func(command devfilev1.Command) (ComponentInfo, error)) *GenericAdapter {
|
||||
data := func() data.DevfileData {
|
||||
devfileData, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
return devfileData
|
||||
}()
|
||||
_ = data.AddCommands(commands)
|
||||
devObj := devfileParser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
ctx := AdapterContext{
|
||||
Devfile: devObj,
|
||||
}
|
||||
a := NewGenericAdapter(fakeExecClient, ctx)
|
||||
a.supervisordComponentInfo = cif
|
||||
a.componentInfo = cif
|
||||
return a
|
||||
}
|
||||
|
||||
func createCommandFrom(id string, composite devfilev1.CompositeCommand) devfilev1.Command {
|
||||
return devfilev1.Command{CommandUnion: devfilev1.CommandUnion{Composite: &composite}}
|
||||
}
|
||||
@@ -1,23 +1,11 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
)
|
||||
|
||||
// ComponentAdapter defines the functions that platform-specific adapters must implement
|
||||
type ComponentAdapter interface {
|
||||
commandExecutor
|
||||
Push(parameters PushParameters) error
|
||||
DoesComponentExist(cmpName string, app string) (bool, error)
|
||||
Delete(labels map[string]string, show bool, wait bool) error
|
||||
Test(testCmd string, show bool) error
|
||||
CheckSupervisordCommandStatus(command devfilev1.Command) error
|
||||
StartContainerStatusWatch()
|
||||
StartSupervisordCtlStatusWatch()
|
||||
Log(follow bool, command devfilev1.Command) (io.ReadCloser, error)
|
||||
Exec(command []string) error
|
||||
Deploy() error
|
||||
UnDeploy() error
|
||||
}
|
||||
|
||||
@@ -17,15 +17,7 @@ import (
|
||||
// PredefinedDevfileCommands encapsulates constants for predefined devfile commands
|
||||
type PredefinedDevfileCommands string
|
||||
|
||||
// DevfileEventType encapsulates constants for devfile events
|
||||
type DevfileEventType string
|
||||
|
||||
const (
|
||||
// DefaultDevfileInitCommand is a predefined devfile command for init
|
||||
DefaultDevfileInitCommand PredefinedDevfileCommands = "devinit"
|
||||
|
||||
// DefaultDevfileBuildCommand is a predefined devfile command for build
|
||||
DefaultDevfileBuildCommand PredefinedDevfileCommands = "devbuild"
|
||||
|
||||
// DefaultDevfileRunCommand is a predefined devfile command for run
|
||||
DefaultDevfileRunCommand PredefinedDevfileCommands = "devrun"
|
||||
@@ -40,9 +32,6 @@ const (
|
||||
// use GetBootstrapperImage() function instead of this variable
|
||||
defaultBootstrapperImage = "registry.access.redhat.com/ocp-tools-4/odo-init-container-rhel8:1.1.11"
|
||||
|
||||
// SupervisordControlCommand sub command which stands for control
|
||||
SupervisordControlCommand = "ctl"
|
||||
|
||||
// SupervisordVolumeName Create a custom name and (hope) that users don't use the *exact* same name in their deployment (occlient.go)
|
||||
SupervisordVolumeName = "odo-supervisord-shared-data"
|
||||
|
||||
@@ -61,18 +50,9 @@ const (
|
||||
// ENV variable to overwrite image used to bootstrap SupervisorD in S2I and Devfile builder Image
|
||||
bootstrapperImageEnvName = "ODO_BOOTSTRAPPER_IMAGE"
|
||||
|
||||
// BinBash The path to sh executable
|
||||
BinBash = "/bin/sh"
|
||||
|
||||
// DefaultVolumeSize Default volume size for volumes defined in a devfile
|
||||
DefaultVolumeSize = "1Gi"
|
||||
|
||||
// EnvProjectsRoot is the env defined for project mount in a component container when component's mountSources=true
|
||||
EnvProjectsRoot = "PROJECTS_ROOT"
|
||||
|
||||
// EnvProjectsSrc is the env defined for path to the project source in a component container
|
||||
EnvProjectsSrc = "PROJECT_SOURCE"
|
||||
|
||||
// EnvOdoCommandRunWorkingDir is the env defined in the runtime component container which holds the work dir for the run command
|
||||
EnvOdoCommandRunWorkingDir = "ODO_COMMAND_RUN_WORKING_DIR"
|
||||
|
||||
@@ -93,26 +73,8 @@ const (
|
||||
|
||||
// SupervisordCtlSubCommand is the supervisord sub command ctl
|
||||
SupervisordCtlSubCommand = "ctl"
|
||||
|
||||
// PreStart is a devfile event
|
||||
PreStart DevfileEventType = "preStart"
|
||||
|
||||
// PostStart is a devfile event
|
||||
PostStart DevfileEventType = "postStart"
|
||||
|
||||
// PreStop is a devfile event
|
||||
PreStop DevfileEventType = "preStop"
|
||||
|
||||
// PostStop is a devfile event
|
||||
PostStop DevfileEventType = "postStop"
|
||||
)
|
||||
|
||||
// CommandNames is a struct to store the default and adapter names for devfile commands
|
||||
type CommandNames struct {
|
||||
DefaultName string
|
||||
AdapterName string
|
||||
}
|
||||
|
||||
// GetBootstrapperImage returns the odo-init bootstrapper image
|
||||
func GetBootstrapperImage() string {
|
||||
if env, ok := os.LookupEnv(bootstrapperImageEnvName); ok {
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/preference"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -43,14 +40,6 @@ func (k Adapter) Push(parameters common.PushParameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k Adapter) Deploy() error {
|
||||
return k.componentAdapter.Deploy()
|
||||
}
|
||||
|
||||
func (k Adapter) UnDeploy() error {
|
||||
return k.componentAdapter.UnDeploy()
|
||||
}
|
||||
|
||||
// CheckSupervisordCommandStatus calls the component adapter's CheckSupervisordCommandStatus
|
||||
func (k Adapter) CheckSupervisordCommandStatus(command devfilev1.Command) error {
|
||||
err := k.componentAdapter.CheckSupervisordCommandStatus(command)
|
||||
@@ -60,67 +49,3 @@ func (k Adapter) CheckSupervisordCommandStatus(command devfilev1.Command) error
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoesComponentExist returns true if a component with the specified name exists in the given app
|
||||
func (k Adapter) DoesComponentExist(cmpName, appName string) (bool, error) {
|
||||
return k.componentAdapter.DoesComponentExist(cmpName, appName)
|
||||
}
|
||||
|
||||
// Delete deletes the Kubernetes resources that correspond to the devfile
|
||||
func (k Adapter) Delete(labels map[string]string, show bool, wait bool) error {
|
||||
|
||||
err := k.componentAdapter.Delete(labels, show, wait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test runs the devfile test command
|
||||
func (k Adapter) Test(testCmd string, show bool) error {
|
||||
return k.componentAdapter.Test(testCmd, show)
|
||||
}
|
||||
|
||||
// Log shows log from component
|
||||
func (k Adapter) Log(follow bool, command devfilev1.Command) (io.ReadCloser, error) {
|
||||
return k.componentAdapter.Log(follow, command)
|
||||
}
|
||||
|
||||
// Exec executes a command in the component
|
||||
func (k Adapter) Exec(command []string) error {
|
||||
return k.componentAdapter.Exec(command)
|
||||
}
|
||||
|
||||
func (k Adapter) ExecCMDInContainer(info common.ComponentInfo, cmd []string, stdOut io.Writer, stdErr io.Writer, stdIn io.Reader, show bool) error {
|
||||
return k.componentAdapter.ExecCMDInContainer(info, cmd, stdOut, stdErr, stdIn, show)
|
||||
}
|
||||
func (k Adapter) Logger() machineoutput.MachineEventLoggingClient {
|
||||
return k.componentAdapter.Logger()
|
||||
}
|
||||
|
||||
func (k Adapter) ComponentInfo(command devfilev1.Command) (common.ComponentInfo, error) {
|
||||
return k.componentAdapter.ComponentInfo(command)
|
||||
}
|
||||
|
||||
func (k Adapter) SupervisorComponentInfo(command devfilev1.Command) (common.ComponentInfo, error) {
|
||||
return k.componentAdapter.SupervisorComponentInfo(command)
|
||||
}
|
||||
|
||||
// StartContainerStatusWatch outputs Kubernetes pod/container status changes to the console, as used by the status command
|
||||
func (k Adapter) StartContainerStatusWatch() {
|
||||
k.componentAdapter.StartContainerStatusWatch()
|
||||
}
|
||||
|
||||
// StartSupervisordCtlStatusWatch outputs supervisord program status changes to the console, as used by the status command
|
||||
func (k Adapter) StartSupervisordCtlStatusWatch() {
|
||||
k.componentAdapter.StartSupervisordCtlStatusWatch()
|
||||
}
|
||||
|
||||
func (k Adapter) ApplyComponent(component string) error {
|
||||
return k.componentAdapter.ApplyComponent(component)
|
||||
}
|
||||
|
||||
func (k Adapter) UnApplyComponent(component string) error {
|
||||
return k.componentAdapter.UnApplyComponent(component)
|
||||
}
|
||||
|
||||
@@ -3,12 +3,13 @@ package component
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"k8s.io/utils/pointer"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/generator"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/utils"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/preference"
|
||||
"github.com/redhat-developer/odo/pkg/service"
|
||||
@@ -33,7 +35,6 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
@@ -311,12 +312,11 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) {
|
||||
|
||||
// PostStart events from the devfile will only be executed when the component
|
||||
// didn't previously exist
|
||||
postStartEvents := a.Devfile.Data.GetEvents().PostStart
|
||||
if !componentExists && len(postStartEvents) > 0 {
|
||||
err = a.ExecDevfileEvent(postStartEvents, common.PostStart, parameters.Show)
|
||||
if !componentExists && libdevfile.HasPostStartEvents(a.Devfile) {
|
||||
log.Infof("\nExecuting %s event commands for component %s", string(libdevfile.PostStart), a.ComponentName)
|
||||
err = libdevfile.ExecPostStartEvents(a.Devfile, a.ComponentName, component.NewExecHandler(a.Client, a.pod.Name, parameters.Show))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -387,7 +387,7 @@ func (a Adapter) CheckSupervisordCommandStatus(command devfilev1.Command) error
|
||||
log.Warningf("devfile command %q exited with error status within %d sec", command.Id, supervisorDStatusWaitTimeInterval)
|
||||
log.Infof("Last %d lines of the component's log:", numberOfLines)
|
||||
|
||||
rd, err := a.Log(false, command)
|
||||
rd, err := component.Log(a.Client, a.ComponentName, a.AppName, false, command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -402,34 +402,6 @@ func (a Adapter) CheckSupervisordCommandStatus(command devfilev1.Command) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test runs the devfile test command
|
||||
func (a Adapter) Test(testCmd string, show bool) (err error) {
|
||||
pod, err := a.Client.GetOnePod(a.ComponentName, a.AppName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occurred while getting the pod: %w", err)
|
||||
}
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("pod for component %s is not running", a.ComponentName)
|
||||
}
|
||||
|
||||
log.Infof("\nExecuting devfile test command for component %s", a.ComponentName)
|
||||
|
||||
testCommand, err := common.ValidateAndGetTestDevfileCommands(a.Devfile.Data, testCmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate devfile test command")
|
||||
}
|
||||
err = a.ExecuteDevfileCommand(testCommand, show, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to execute devfile commands for component %s", a.ComponentName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoesComponentExist returns true if a component with the specified name exists, false otherwise
|
||||
func (a Adapter) DoesComponentExist(cmpName string, appName string) (bool, error) {
|
||||
return utils.ComponentExists(a.Client, cmpName, appName)
|
||||
}
|
||||
|
||||
func (a *Adapter) createOrUpdateComponent(componentExists bool, ei envinfo.EnvSpecificInfo, isMainStorageEphemeral bool) (err error) {
|
||||
ei.SetDevfileObj(a.Devfile)
|
||||
|
||||
@@ -673,111 +645,6 @@ func getFirstContainerWithSourceVolume(containers []corev1.Container) (string, s
|
||||
return "", "", fmt.Errorf("in order to sync files, odo requires at least one component in a devfile to set 'mountSources: true'")
|
||||
}
|
||||
|
||||
// Delete deletes the component
|
||||
func (a Adapter) Delete(labels map[string]string, show bool, wait bool) error {
|
||||
if labels == nil {
|
||||
return fmt.Errorf("cannot delete with labels being nil")
|
||||
}
|
||||
log.Printf("Gathering information for component: %q", a.ComponentName)
|
||||
podSpinner := log.Spinner("Checking status for component")
|
||||
defer podSpinner.End(false)
|
||||
|
||||
pod, err := a.Client.GetOnePod(a.ComponentName, a.AppName)
|
||||
if kerrors.IsForbidden(err) {
|
||||
klog.V(2).Infof("Resource for %s forbidden", a.ComponentName)
|
||||
// log the error if it failed to determine if the component exists due to insufficient RBACs
|
||||
podSpinner.End(false)
|
||||
log.Warningf("%v", err)
|
||||
return nil
|
||||
} else if e, ok := err.(*kclient.PodNotFoundError); ok {
|
||||
podSpinner.End(false)
|
||||
log.Warningf("%v", e)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "unable to determine if component %s exists", a.ComponentName)
|
||||
}
|
||||
|
||||
podSpinner.End(true)
|
||||
|
||||
// if there are preStop events, execute them before deleting the deployment
|
||||
preStopEvents := a.Devfile.Data.GetEvents().PreStop
|
||||
if len(preStopEvents) > 0 {
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("unable to execute preStop events, pod for component %s is not running", a.ComponentName)
|
||||
}
|
||||
|
||||
err = a.ExecDevfileEvent(preStopEvents, common.PreStop, show)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("\nDeleting component %s", a.ComponentName)
|
||||
spinner := log.Spinner("Deleting Kubernetes resources for component")
|
||||
defer spinner.End(false)
|
||||
|
||||
err = a.Client.Delete(labels, wait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spinner.End(true)
|
||||
log.Successf("Successfully deleted component")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log returns log from component
|
||||
func (a Adapter) Log(follow bool, command devfilev1.Command) (io.ReadCloser, error) {
|
||||
|
||||
pod, err := a.Client.GetOnePod(a.ComponentName, a.AppName)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("the component %s doesn't exist on the cluster", a.ComponentName)
|
||||
}
|
||||
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return nil, errors.Errorf("unable to show logs, component is not in running state. current status=%v", pod.Status.Phase)
|
||||
}
|
||||
|
||||
containerName := command.Exec.Component
|
||||
|
||||
return a.Client.GetPodLogs(pod.Name, containerName, follow)
|
||||
}
|
||||
|
||||
// Exec executes a command in the component
|
||||
func (a Adapter) Exec(command []string) error {
|
||||
exists, err := utils.ComponentExists(a.Client, a.ComponentName, a.AppName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return errors.Errorf("the component %s doesn't exist on the cluster", a.ComponentName)
|
||||
}
|
||||
|
||||
runCommand, err := common.GetRunCommand(a.Devfile.Data, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
containerName := runCommand.Exec.Component
|
||||
|
||||
// get the pod
|
||||
pod, err := a.Client.GetOnePod(a.ComponentName, a.AppName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get pod for component %s", a.ComponentName)
|
||||
}
|
||||
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("unable to exec as the component is not running. Current status=%v", pod.Status.Phase)
|
||||
}
|
||||
|
||||
componentInfo := common.ComponentInfo{
|
||||
PodName: pod.Name,
|
||||
ContainerName: containerName,
|
||||
}
|
||||
|
||||
return a.ExecuteCommand(componentInfo, command, true, nil, nil)
|
||||
}
|
||||
|
||||
func (a Adapter) ExecCMDInContainer(componentInfo common.ComponentInfo, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error {
|
||||
return a.Client.ExecCMDInContainer(componentInfo.ContainerName, componentInfo.PodName, cmd, stdout, stderr, stdin, tty)
|
||||
}
|
||||
@@ -786,102 +653,3 @@ func (a Adapter) ExecCMDInContainer(componentInfo common.ComponentInfo, cmd []st
|
||||
func (a Adapter) ExtractProjectToComponent(componentInfo common.ComponentInfo, targetPath string, stdin io.Reader) error {
|
||||
return a.Client.ExtractProjectToComponent(componentInfo.ContainerName, componentInfo.PodName, targetPath, stdin)
|
||||
}
|
||||
|
||||
// Deploy executes the 'deploy' command defined in a devfile
|
||||
func (a Adapter) Deploy() error {
|
||||
deployCmd, err := a.getDeployCommand()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.ExecuteDevfileCommand(deployCmd, true, false)
|
||||
}
|
||||
|
||||
// UnDeploy reverses the effect of the 'deploy' command defined in a devfile
|
||||
func (a Adapter) UnDeploy() error {
|
||||
deployCmd, err := a.getDeployCommand()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return a.ExecuteDevfileCommand(deployCmd, true, true)
|
||||
}
|
||||
|
||||
// ExecuteDevfileCommand executes the devfile command; if unexecute is set to true, it reverses the effect of Execute
|
||||
func (a Adapter) ExecuteDevfileCommand(command devfilev1.Command, show, unexecute bool) error {
|
||||
commands, err := a.Devfile.Data.GetCommands(parsercommon.DevfileOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := common.New(command, common.GetCommandsMap(commands), &a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unexecute {
|
||||
return c.UnExecute()
|
||||
}
|
||||
return c.Execute(show)
|
||||
}
|
||||
|
||||
// ApplyComponent 'applies' a devfile component
|
||||
func (a Adapter) ApplyComponent(componentName string) error {
|
||||
cmp, err := a.getApplyComponent(componentName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmp.Apply(a.Devfile, a.Context)
|
||||
}
|
||||
|
||||
// UnApplyComponent un-'applies' a devfile component
|
||||
func (a Adapter) UnApplyComponent(componentName string) error {
|
||||
cmp, err := a.getApplyComponent(componentName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmp.UnApply(a.Context)
|
||||
}
|
||||
|
||||
// getDeployCommand validates the deploy command and returns it
|
||||
func (a Adapter) getDeployCommand() (devfilev1.Command, error) {
|
||||
deployGroupCmd, err := a.Devfile.Data.GetCommands(parsercommon.DevfileOptions{
|
||||
CommandOptions: parsercommon.CommandOptions{
|
||||
CommandGroupKind: devfilev1.DeployCommandGroupKind,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return devfilev1.Command{}, err
|
||||
}
|
||||
if len(deployGroupCmd) == 0 {
|
||||
return devfilev1.Command{}, &NoDefaultDeployCommandFoundError{}
|
||||
}
|
||||
if len(deployGroupCmd) > 1 {
|
||||
return devfilev1.Command{}, &MoreThanOneDefaultDeployCommandFoundError{}
|
||||
}
|
||||
return deployGroupCmd[0], nil
|
||||
}
|
||||
|
||||
// getApplyComponent returns the 'Apply' command's component(kubernetes/image)
|
||||
func (a Adapter) getApplyComponent(componentName string) (componentToApply, error) {
|
||||
components, err := a.Devfile.Data.GetComponents(parsercommon.DevfileOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var component devfilev1.Component
|
||||
var found bool
|
||||
for _, component = range components {
|
||||
if component.Name == componentName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("component %q not found", componentName)
|
||||
}
|
||||
|
||||
cmp, err := createComponent(a, component)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cmp, nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/generator"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/preference"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
@@ -374,7 +375,7 @@ func TestDoesComponentExist(t *testing.T) {
|
||||
})
|
||||
|
||||
// Verify that a component with the specified name exists
|
||||
componentExists, err := componentAdapter.DoesComponentExist(tt.getComponentName, "")
|
||||
componentExists, err := component.ComponentExists(fkclient, tt.getComponentName, "")
|
||||
if !tt.wantErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if !tt.wantErr && componentExists != tt.want {
|
||||
@@ -460,6 +461,7 @@ func TestWaitAndGetComponentPod(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// TODO move to pkg/component
|
||||
func TestAdapterDelete(t *testing.T) {
|
||||
type args struct {
|
||||
labels map[string]string
|
||||
@@ -569,8 +571,6 @@ func TestAdapterDelete(t *testing.T) {
|
||||
|
||||
fkclient, fkclientset := kclient.FakeNew()
|
||||
|
||||
a := New(adapterCtx, fkclient, nil)
|
||||
|
||||
fkclientset.Kubernetes.PrependReactor("delete-collection", "deployments", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
if util.ConvertLabelsToSelector(tt.args.labels) != action.(ktesting.DeleteCollectionAction).GetListRestrictions().Labels.String() {
|
||||
return true, nil, errors.Errorf("collection labels are not matching, wanted: %v, got: %v", util.ConvertLabelsToSelector(tt.args.labels), action.(ktesting.DeleteCollectionAction).GetListRestrictions().Labels.String())
|
||||
@@ -589,7 +589,7 @@ func TestAdapterDelete(t *testing.T) {
|
||||
return true, tt.existingPod, nil
|
||||
})
|
||||
|
||||
if err := a.Delete(tt.args.labels, false, false); (err != nil) != tt.wantErr {
|
||||
if err := component.Delete(fkclient, devObj, tt.componentName, "app", tt.args.labels, false, false); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Delete() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// componentToApply represents a devfile component that can be applied
|
||||
type componentToApply interface {
|
||||
Apply(devfileObj parser.DevfileObj, devfilePath string) error
|
||||
UnApply(devfilePath string) error
|
||||
}
|
||||
|
||||
// createComponent returns an instance of a devfile component specific to its type (image, kubernetes, etc)
|
||||
func createComponent(adapter Adapter, component devfilev1.Component) (componentToApply, error) {
|
||||
if component.Image != nil {
|
||||
return newComponentImage(component), nil
|
||||
} else if component.Kubernetes != nil {
|
||||
return newComponentKubernetes(adapter.Client, component, adapter.ComponentName, adapter.AppName), nil
|
||||
}
|
||||
return nil, fmt.Errorf("component type not supported for component %q", component.Name)
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
)
|
||||
|
||||
// componentImage represents a devfile component of type Image
|
||||
type componentImage struct {
|
||||
component devfilev1.Component
|
||||
}
|
||||
|
||||
func newComponentImage(component devfilev1.Component) componentImage {
|
||||
return componentImage{component: component}
|
||||
}
|
||||
|
||||
// Apply a component of type Image by building and pushing the image
|
||||
func (o componentImage) Apply(devfileObj parser.DevfileObj, devfilePath string) error {
|
||||
return image.BuildPushSpecificImage(devfileObj, devfilePath, o.component, true)
|
||||
}
|
||||
|
||||
func (o componentImage) UnApply(devfilePath string) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
devfilefs "github.com/devfile/library/pkg/testingutil/filesystem"
|
||||
"github.com/pkg/errors"
|
||||
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/service"
|
||||
)
|
||||
|
||||
// componentKubernetes represents a devfile component of type Kubernetes
|
||||
type componentKubernetes struct {
|
||||
client kclient.ClientInterface
|
||||
component devfilev1.Component
|
||||
componentName string
|
||||
appName string
|
||||
}
|
||||
|
||||
func newComponentKubernetes(client kclient.ClientInterface, component devfilev1.Component, componentName string, appName string) componentKubernetes {
|
||||
return componentKubernetes{
|
||||
client: client,
|
||||
component: component,
|
||||
componentName: componentName,
|
||||
appName: appName,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply a component of type Kubernetes by creating resources into a Kubernetes cluster
|
||||
func (o componentKubernetes) Apply(devfileObj parser.DevfileObj, devfilePath string) error {
|
||||
// validate if the GVRs represented by Kubernetes inlined components are supported by the underlying cluster
|
||||
_, err := service.ValidateResourceExist(o.client, o.component, devfilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labels := componentlabels.GetLabels(o.componentName, o.appName, true)
|
||||
u, err := service.GetK8sComponentAsUnstructured(o.component.Kubernetes, devfilePath, devfilefs.DefaultFs{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("\nDeploying Kubernetes %s: %s", u.GetKind(), u.GetName())
|
||||
isOperatorBackedService, err := service.PushKubernetesResource(o.client, u, labels)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create service(s) associated with the component")
|
||||
}
|
||||
if isOperatorBackedService {
|
||||
log.Successf("Kubernetes resource %q on the cluster; refer %q to know how to link it to the component", strings.Join([]string{u.GetKind(), u.GetName()}, "/"), "odo link -h")
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o componentKubernetes) UnApply(devfilePath string) error {
|
||||
// Parse the component's Kubernetes manifest
|
||||
u, err := service.GetK8sComponentAsUnstructured(o.component.Kubernetes, devfilePath, devfilefs.DefaultFs{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the REST mappings
|
||||
gvr, err := o.client.GetRestMappingFromUnstructured(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Un-deploying the Kubernetes %s: %s", u.GetKind(), u.GetName())
|
||||
// Un-deploy the K8s manifest
|
||||
return o.client.DeleteDynamicResource(u.GetName(), gvr.Resource.Group, gvr.Resource.Version, gvr.Resource.Resource)
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package component
|
||||
|
||||
type NoDefaultDeployCommandFoundError struct{}
|
||||
|
||||
func (e NoDefaultDeployCommandFoundError) Error() string {
|
||||
return "error deploying, no default deploy command found in devfile"
|
||||
}
|
||||
|
||||
type MoreThanOneDefaultDeployCommandFoundError struct{}
|
||||
|
||||
func (e MoreThanOneDefaultDeployCommandFoundError) Error() string {
|
||||
return "more than one default deploy command found in devfile, should not happen"
|
||||
}
|
||||
@@ -1,442 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// KubernetesResourceFailureInterval is the time between attempts to acquire needed k8s resources
|
||||
KubernetesResourceFailureInterval = time.Duration(5) * time.Second
|
||||
)
|
||||
|
||||
// podWatcher is responsible for watching for changes to odo-managed Pods, and reporting those changes to the console, as used by the status command
|
||||
type podWatcher struct {
|
||||
adapter *Adapter
|
||||
statusReconcilerChan chan statusReconcilerChannelEntry
|
||||
}
|
||||
|
||||
// StartContainerStatusWatch outputs Kubernetes pod/container status changes to the console, as used by the status command
|
||||
func (a Adapter) StartContainerStatusWatch() {
|
||||
|
||||
pw := newPodWatcher(&a)
|
||||
pw.startPodWatcher()
|
||||
|
||||
}
|
||||
|
||||
func newPodWatcher(adapter *Adapter) *podWatcher {
|
||||
return &podWatcher{
|
||||
adapter: adapter,
|
||||
statusReconcilerChan: createStatusReconciler(adapter),
|
||||
}
|
||||
}
|
||||
|
||||
func (pw *podWatcher) startPodWatcher() {
|
||||
pw.startWatchThread(pw.adapter)
|
||||
}
|
||||
|
||||
// statusReconcilerChannelEntry is the gochannel message sent from the Watcher to the status reconciler
|
||||
type statusReconcilerChannelEntry struct {
|
||||
|
||||
// If isCompleteListOfPods is true: a list of all component pods in the workspace
|
||||
// If isCompleteListOfPods is false: a single component pod in the workspace
|
||||
pods []*corev1.Pod
|
||||
|
||||
err error
|
||||
|
||||
// isCompleteListOfPods is true if the pods came from getLatestContainerStatus(), false otherwise
|
||||
isCompleteListOfPods bool
|
||||
|
||||
// isDeleteEventFromWatch is true if watch.Deleted event from the watch, false otherwise
|
||||
isDeleteEventFromWatch bool
|
||||
|
||||
// watchThreadRestarted is true if the watch thread died (for example, due to losing network connection) and had to be reestablished
|
||||
isWatchThreadRestarted bool
|
||||
}
|
||||
|
||||
// getLatestContainerStatus returns a KubernetesDeploymentStatus for the given component; this function blocks until it is available
|
||||
func getLatestContainerStatus(adapter *Adapter) *KubernetesDeploymentStatus {
|
||||
|
||||
// Keep trying to acquire the ReplicaSet and DeploymentSet of the component, so that we can reliably find its pods
|
||||
for {
|
||||
containerStatus, err := adapter.getDeploymentStatus()
|
||||
if err == nil {
|
||||
|
||||
if containerStatus.DeploymentUID == "" || containerStatus.ReplicaSetUID == "" {
|
||||
adapter.Logger().ReportError(fmt.Errorf("unable to retrieve component deployment and replica set, trying again in a few moments"), machineoutput.TimestampNow())
|
||||
time.Sleep(KubernetesResourceFailureInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
return containerStatus
|
||||
}
|
||||
|
||||
adapter.Logger().ReportError(errors.Wrapf(err, "unable to retrieve component deployment and replica set, trying again in a few moments"), machineoutput.TimestampNow())
|
||||
time.Sleep(KubernetesResourceFailureInterval)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (pw *podWatcher) startWatchThread(adapter *Adapter) {
|
||||
|
||||
// Kick off the goroutine then return execution
|
||||
go func() {
|
||||
|
||||
watchAttempts := 1
|
||||
|
||||
var w watch.Interface = nil
|
||||
for {
|
||||
|
||||
klog.V(4).Infof("Attempting to acquire watch, attempt #%d", watchAttempts)
|
||||
|
||||
var err error
|
||||
w, err = adapter.Client.GetClient().CoreV1().Pods(adapter.Client.GetCurrentNamespace()).Watch(context.TODO(), metav1.ListOptions{})
|
||||
|
||||
if err != nil || w == nil {
|
||||
|
||||
if err != nil {
|
||||
adapter.Logger().ReportError(err, machineoutput.TimestampNow())
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Unable to establish watch, trying again in a few moments seconds. Error was: %v", err)
|
||||
|
||||
time.Sleep(KubernetesResourceFailureInterval)
|
||||
watchAttempts++
|
||||
} else {
|
||||
// Success!
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Watch is successfully established.")
|
||||
|
||||
kubeContainerStatus := getLatestContainerStatus(adapter)
|
||||
|
||||
// After the watch is established, provide the reconciler with a list of all the current pods in the namespace (not just delta), so that
|
||||
// old pods may be deleted from the reconciler (eg those pods that were deleted in the namespace while the watch was dead).
|
||||
// (This prevents a race condition where pods deleted during a watch outage might be missed forever).
|
||||
pw.statusReconcilerChan <- statusReconcilerChannelEntry{
|
||||
pods: kubeContainerStatus.Pods,
|
||||
isCompleteListOfPods: true,
|
||||
isDeleteEventFromWatch: false,
|
||||
err: nil,
|
||||
}
|
||||
|
||||
// We have succesfully established the watch, so kick off the watch event listener
|
||||
go pw.watchEventListener(w, kubeContainerStatus.ReplicaSetUID)
|
||||
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// This function runs in a goroutine for each watch. This goroutine exits if the watch dies (for example due to network disconnect),
|
||||
// at which point the watch acquisition process begins again.
|
||||
func (pw *podWatcher) watchEventListener(w watch.Interface, replicaSetUID types.UID) {
|
||||
for {
|
||||
|
||||
// Retrieve watch event
|
||||
entry := <-w.ResultChan()
|
||||
|
||||
// Restart the watch acquisition process on death, then exit
|
||||
if entry.Object == nil && entry.Type == "" {
|
||||
klog.V(4).Infof("Watch has died; initiating re-establish.")
|
||||
pw.statusReconcilerChan <- statusReconcilerChannelEntry{
|
||||
isWatchThreadRestarted: true,
|
||||
}
|
||||
pw.startWatchThread(pw.adapter)
|
||||
return
|
||||
}
|
||||
|
||||
// We only care about watch events that are related to Pods
|
||||
if pod, ok := entry.Object.(*corev1.Pod); ok && pod != nil {
|
||||
|
||||
// Look for pods that are owned by the replicaset of our deployment
|
||||
ownerRefMatches := false
|
||||
for _, ownerRef := range pod.OwnerReferences {
|
||||
if ownerRef.UID == replicaSetUID {
|
||||
ownerRefMatches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ownerRefMatches {
|
||||
continue
|
||||
}
|
||||
|
||||
// We located the component pod, so now pass it to our status reconciler to report to the console (if required)
|
||||
pw.statusReconcilerChan <- statusReconcilerChannelEntry{
|
||||
pods: []*corev1.Pod{pod},
|
||||
err: nil,
|
||||
isCompleteListOfPods: false, // only a delta
|
||||
isDeleteEventFromWatch: entry.Type == watch.Deleted,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createStatusReconciler kicks off a goroutine which receives messages containing updates to odo-managed k8s Pod resources.
|
||||
// For each message received, this function must determine if that resources has changed (in a way that we care about), and
|
||||
// if so, report that as a change event.
|
||||
func createStatusReconciler(adapter *Adapter) chan statusReconcilerChannelEntry {
|
||||
|
||||
senderChannel := make(chan statusReconcilerChannelEntry)
|
||||
|
||||
go func() {
|
||||
|
||||
// This map is the single source of truth re: what odo expects the cluster namespace to look like; when
|
||||
// new events are received that contain pod data that differs from this, the user should be informed of the delta
|
||||
// (and this 'truth' should be updated.)
|
||||
//
|
||||
// Map key is pod UID
|
||||
mostRecentPodStatus := map[string]*KubernetesPodStatus{}
|
||||
|
||||
for {
|
||||
|
||||
entry := <-senderChannel
|
||||
|
||||
if entry.isWatchThreadRestarted {
|
||||
// On network disconnect, clear the status map
|
||||
mostRecentPodStatus = map[string]*KubernetesPodStatus{}
|
||||
}
|
||||
|
||||
if entry.err != nil {
|
||||
adapter.Logger().ReportError(entry.err, machineoutput.TimestampNow())
|
||||
klog.V(4).Infof("Error received on status reconciler channel %v", entry.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.pods == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Map key is pod UID (we don't use the map value)
|
||||
entryPodUIDs := map[string]string{}
|
||||
for _, pod := range entry.pods {
|
||||
entryPodUIDs[string(pod.UID)] = string(pod.UID)
|
||||
}
|
||||
|
||||
changeDetected := false
|
||||
|
||||
// This section of the algorithm only works if the entry was from a podlist (which contains the full list
|
||||
// of all pods that exist in the namespace), rather than the watch (which contains only one pod in
|
||||
// the namespace.)
|
||||
if entry.isCompleteListOfPods {
|
||||
// Detect if there exists a UID in mostRecentPodStatus that is not in entry; if so, one or more previous
|
||||
// pods have disappeared, so set changeDetected to true.
|
||||
for mostRecentPodUID := range mostRecentPodStatus {
|
||||
if _, exists := entryPodUIDs[mostRecentPodUID]; !exists {
|
||||
klog.V(4).Infof("Status change detected: Could not find previous pod %s in most recent pod status", mostRecentPodUID)
|
||||
delete(mostRecentPodStatus, mostRecentPodUID)
|
||||
changeDetected = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !changeDetected {
|
||||
|
||||
// For each pod we received a status for, determine if it is a change, and if so, update mostRecentPodStatus
|
||||
for _, pod := range entry.pods {
|
||||
podVal := CreateKubernetesPodStatusFromPod(*pod)
|
||||
|
||||
if entry.isDeleteEventFromWatch {
|
||||
delete(mostRecentPodStatus, string(pod.UID))
|
||||
klog.V(4).Infof("Removing deleted pod %s", pod.UID)
|
||||
changeDetected = true
|
||||
continue
|
||||
}
|
||||
|
||||
// If a pod exists in the new pod status, that we have not seen before, then a change is detected.
|
||||
prevValue, exists := mostRecentPodStatus[string(pod.UID)]
|
||||
if !exists {
|
||||
mostRecentPodStatus[string(pod.UID)] = &podVal
|
||||
klog.V(4).Infof("Adding new pod to most recent pod status %s", pod.UID)
|
||||
changeDetected = true
|
||||
|
||||
} else {
|
||||
// If the pod exists in both the old and new status, then do a deep comparison
|
||||
areEqual := areEqual(&podVal, prevValue)
|
||||
if areEqual != "" {
|
||||
mostRecentPodStatus[string(pod.UID)] = &podVal
|
||||
klog.V(4).Infof("Pod value %s has changed: %s", pod.UID, areEqual)
|
||||
changeDetected = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On change: output all pods (our full knowledge of the odo-managed components in the namespace) as a single JSON event
|
||||
if changeDetected {
|
||||
|
||||
podStatuses := []machineoutput.KubernetesPodStatusEntry{}
|
||||
|
||||
for _, v := range mostRecentPodStatus {
|
||||
|
||||
startTime := ""
|
||||
if v.StartTime != nil {
|
||||
startTime = machineoutput.FormatTime(*v.StartTime)
|
||||
}
|
||||
|
||||
podStatuses = append(podStatuses, machineoutput.KubernetesPodStatusEntry{
|
||||
Name: v.Name,
|
||||
Containers: v.Containers,
|
||||
InitContainers: v.InitContainers,
|
||||
Labels: v.Labels,
|
||||
Phase: v.Phase,
|
||||
UID: v.UID,
|
||||
StartTime: startTime,
|
||||
})
|
||||
}
|
||||
|
||||
adapter.Logger().KubernetesPodStatus(podStatuses, machineoutput.TimestampNow())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return senderChannel
|
||||
}
|
||||
|
||||
// areEqual compares two KubernetesPodStatus and returns a non-empty string if the two are not equivalent.
|
||||
// Note: returned strings are for logging/debug purposes only.
|
||||
func areEqual(one *KubernetesPodStatus, two *KubernetesPodStatus) string {
|
||||
|
||||
if one.UID != two.UID {
|
||||
return fmt.Sprintf("UIDs differ %s %s", one.UID, two.UID)
|
||||
}
|
||||
|
||||
if one.Name != two.Name {
|
||||
return fmt.Sprintf("Names differ %s %s", one.Name, two.Name)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(one.StartTime, two.StartTime) {
|
||||
return fmt.Sprintf("Start times differ %v %v", one.StartTime, two.StartTime)
|
||||
}
|
||||
|
||||
if one.Phase != two.Phase {
|
||||
return fmt.Sprintf("Pod phase differs %s %s", one.Phase, two.Phase)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(one.Labels, two.Labels) {
|
||||
return fmt.Sprintf("Labels differ %v %v", one.Labels, two.Labels)
|
||||
}
|
||||
|
||||
initContainerComparison := compareCoreContainerStatusList(one.InitContainers, two.InitContainers)
|
||||
if initContainerComparison != "" {
|
||||
return fmt.Sprintf("Init containers differ: %s", initContainerComparison)
|
||||
}
|
||||
|
||||
containerComparison := compareCoreContainerStatusList(one.Containers, two.Containers)
|
||||
if containerComparison != "" {
|
||||
return fmt.Sprintf("Containers differ %s", containerComparison)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// compareCoreContainerStatusList compares two ContainerStatus arrays and returns a non-empty string if the two are not equivalent.
|
||||
// Note: returned strings are for logging/debug purposes only.
|
||||
func compareCoreContainerStatusList(oneParam []corev1.ContainerStatus, twoParam []corev1.ContainerStatus) string {
|
||||
|
||||
// One-way list compare, using container name to identify individual entries
|
||||
compareFunc := func(paramA []corev1.ContainerStatus, paramB []corev1.ContainerStatus) string {
|
||||
|
||||
// key: container name
|
||||
oneMap := map[string]*corev1.ContainerStatus{}
|
||||
|
||||
// Populate oneMap
|
||||
for index, one := range paramA {
|
||||
oneMap[one.Name] = ¶mA[index]
|
||||
}
|
||||
|
||||
// Iterate through paramB and compare with the corresponding container name in paramA
|
||||
for index, two := range paramB {
|
||||
|
||||
oneEntry, exists := oneMap[two.Name]
|
||||
|
||||
// If an entry is present in two but not one
|
||||
if !exists || oneEntry == nil {
|
||||
return fmt.Sprintf("Container with id %s was present in one state but not the other", two.Name)
|
||||
}
|
||||
|
||||
comparison := areCoreContainerStatusesEqual(oneEntry, ¶mB[index])
|
||||
|
||||
if comparison != "" {
|
||||
return comparison
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Since compareFunc is unidirectional, we do it twice
|
||||
result := compareFunc(oneParam, twoParam)
|
||||
if result != "" {
|
||||
return result
|
||||
}
|
||||
|
||||
result = compareFunc(twoParam, oneParam)
|
||||
return result
|
||||
|
||||
}
|
||||
|
||||
// areCoreContainerStatusesEqual compares two ContainerStatus and returns a non-empty string if the two are not equivalent.
|
||||
// Note: returned strings are for logging/debug purposes only.
|
||||
func areCoreContainerStatusesEqual(one *corev1.ContainerStatus, two *corev1.ContainerStatus) string {
|
||||
|
||||
if one.Name != two.Name {
|
||||
return fmt.Sprintf("Core status names differ [%s] [%s]", one.Name, two.Name)
|
||||
}
|
||||
|
||||
if one.ContainerID != two.ContainerID {
|
||||
return fmt.Sprintf("Core status container IDs differ: [%s] [%s]", one.ContainerID, two.ContainerID)
|
||||
}
|
||||
|
||||
compareStates := compareCoreContainerState(one.State, two.State)
|
||||
if compareStates != "" {
|
||||
return fmt.Sprintf("Core status states differ %s", compareStates)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// compareCoreContainerState compares two ContainerState and returns a non-empty string if the two are not equivalent.
|
||||
// Note: returned strings are for logging/debug purposes only.
|
||||
func compareCoreContainerState(oneParam corev1.ContainerState, twoParam corev1.ContainerState) string {
|
||||
|
||||
// At present, we only compare the state, and not the state contents, so convert the state to a string and
|
||||
// discard the other information.
|
||||
toString := func(one corev1.ContainerState) string {
|
||||
if one.Running != nil {
|
||||
return "Running"
|
||||
}
|
||||
|
||||
if one.Terminated != nil {
|
||||
return "Terminated"
|
||||
}
|
||||
|
||||
if one.Waiting != nil {
|
||||
return "Waiting"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
oneParamState := toString(oneParam)
|
||||
twoParamState := toString(twoParam)
|
||||
|
||||
if oneParamState != twoParamState {
|
||||
return "Core container states different: " + oneParamState + " " + twoParamState
|
||||
}
|
||||
|
||||
return ""
|
||||
|
||||
}
|
||||
@@ -1,481 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
|
||||
devfileParser "github.com/devfile/library/pkg/devfile/parser"
|
||||
adaptersCommon "github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestStatusReconciler(t *testing.T) {
|
||||
componentName := "my-component"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pre []testReconcilerEntry
|
||||
expectedPreEvents int
|
||||
post []testReconcilerEntry
|
||||
successFn func(lfo *logFuncOutput) string
|
||||
}{
|
||||
{
|
||||
name: "a new pod should trigger a status update",
|
||||
pre: []testReconcilerEntry{},
|
||||
expectedPreEvents: 0,
|
||||
post: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
|
||||
latestPodStatus := lfo.getMostRecentKubernetesPodStatus()
|
||||
if latestPodStatus == nil {
|
||||
return "pod not found"
|
||||
}
|
||||
if len(latestPodStatus.Pods) != 1 {
|
||||
return fmt.Sprintf("unexpected pod size, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if latestPodStatus.Pods[0].Name != "my-component" {
|
||||
return fmt.Sprintf("mismatching component %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
return ""
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if a pod is deleted, trigger a status update",
|
||||
pre: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedPreEvents: 1,
|
||||
post: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
}),
|
||||
},
|
||||
isDeleteEventFromWatch: true,
|
||||
},
|
||||
},
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
latestPodStatus := lfo.getMostRecentKubernetesPodStatus()
|
||||
if latestPodStatus == nil {
|
||||
return "pod not found"
|
||||
}
|
||||
|
||||
if len(latestPodStatus.Pods) != 0 {
|
||||
return fmt.Sprintf("Unexpected number of pods: %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
return ""
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if a pod is updated, trigger a status update",
|
||||
pre: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodPending
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedPreEvents: 1,
|
||||
post: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
latestPodStatus := lfo.getMostRecentKubernetesPodStatus()
|
||||
if latestPodStatus == nil {
|
||||
return "pod not found"
|
||||
}
|
||||
|
||||
if len(latestPodStatus.Pods) != 1 {
|
||||
return fmt.Sprintf("unexpected pod size, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if latestPodStatus.Pods[0].Name != "my-component" {
|
||||
return fmt.Sprintf("mismatching component, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if latestPodStatus.Pods[0].Phase != string(corev1.PodRunning) {
|
||||
return fmt.Sprintf("unexpected pod phase, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
return ""
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if a pod fails and is replaced by another, but both temporarily exist together",
|
||||
pre: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodPending
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPreEvents: 1,
|
||||
post: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "two"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
latestPodStatus := lfo.getMostRecentKubernetesPodStatus()
|
||||
if latestPodStatus == nil {
|
||||
return "pod not found"
|
||||
}
|
||||
|
||||
if len(latestPodStatus.Pods) != 2 {
|
||||
return fmt.Sprintf("unexpected pod size, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
for _, pod := range latestPodStatus.Pods {
|
||||
|
||||
if pod.Name != "my-component" {
|
||||
return fmt.Sprintf("mismatching component, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if pod.UID == "one" {
|
||||
if pod.Phase != string(corev1.PodFailed) {
|
||||
return fmt.Sprintf("unexpected pod phase, %v", lfo.debugSprintAll())
|
||||
}
|
||||
}
|
||||
|
||||
if pod.UID == "two" {
|
||||
if pod.Phase != string(corev1.PodRunning) {
|
||||
return fmt.Sprintf("unexpected pod phase, %v", lfo.debugSprintAll())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ""
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if a pod fails, and is fully replaced (one and new pod don't co-exist at the same time)",
|
||||
|
||||
pre: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedPreEvents: 1,
|
||||
post: []testReconcilerEntry{
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "one"
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
}),
|
||||
},
|
||||
isDeleteEventFromWatch: true,
|
||||
},
|
||||
{
|
||||
pods: []*corev1.Pod{
|
||||
createFakePod(componentName, componentName, func(pod *corev1.Pod) {
|
||||
pod.UID = "two"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
latestPodStatus := lfo.getMostRecentKubernetesPodStatus()
|
||||
if latestPodStatus == nil {
|
||||
return "pod not found"
|
||||
}
|
||||
|
||||
if len(latestPodStatus.Pods) != 1 {
|
||||
return fmt.Sprintf("unexpected pod size, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if latestPodStatus.Pods[0].Name != "my-component" {
|
||||
return fmt.Sprintf("mismatching component, %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
if latestPodStatus.Pods[0].UID != "two" {
|
||||
return fmt.Sprintf("unexpected pod UID, %v", lfo.debugSprintAll())
|
||||
}
|
||||
return ""
|
||||
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "no changes should trigger no events",
|
||||
pre: []testReconcilerEntry{},
|
||||
expectedPreEvents: 0,
|
||||
post: []testReconcilerEntry{},
|
||||
successFn: func(lfo *logFuncOutput) string {
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if lfo.listSize() > 0 {
|
||||
return fmt.Sprintf("unexpected events in output %v", lfo.debugSprintAll())
|
||||
}
|
||||
|
||||
return ""
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
devObj := devfileParser.DevfileObj{
|
||||
Data: func() data.DevfileData {
|
||||
devfileData, err := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return devfileData
|
||||
}(),
|
||||
}
|
||||
|
||||
adapterCtx := adaptersCommon.AdapterContext{
|
||||
ComponentName: componentName,
|
||||
Devfile: devObj,
|
||||
}
|
||||
|
||||
fkclient, _ := kclient.FakeNew()
|
||||
|
||||
adapter := New(adapterCtx, fkclient, nil)
|
||||
|
||||
lfo := logFuncOutput{}
|
||||
adapter.GenericAdapter.SetLogger(machineoutput.NewConsoleMachineEventLoggingClientWithFunction(lfo.logFunc))
|
||||
|
||||
reconcilerChannel := createStatusReconciler(&adapter)
|
||||
|
||||
// Initialize with an empty list
|
||||
reconcilerChannel <- statusReconcilerChannelEntry{
|
||||
pods: []*corev1.Pod{},
|
||||
err: nil,
|
||||
isCompleteListOfPods: true,
|
||||
isDeleteEventFromWatch: false,
|
||||
}
|
||||
|
||||
for _, fauxReconcilerEntry := range tt.pre {
|
||||
// Send the initial simulated cluster status before the test runs
|
||||
reconcilerChannel <- statusReconcilerChannelEntry{
|
||||
pods: fauxReconcilerEntry.pods,
|
||||
err: nil,
|
||||
isCompleteListOfPods: fauxReconcilerEntry.isCompleteListOfPods,
|
||||
isDeleteEventFromWatch: fauxReconcilerEntry.isDeleteEventFromWatch,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Wait for the expected number of events that will be generated by sending the initial faux cluster status
|
||||
expireTime := time.Now().Add(5 * time.Second)
|
||||
for lfo.listSize()-tt.expectedPreEvents != 0 {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
if time.Now().After(expireTime) {
|
||||
t.Fatalf("unexpected number of pre events: %v", lfo.debugSprintAll())
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the expected events
|
||||
lfo.clearList()
|
||||
|
||||
for _, fauxReconcilerEntry := range tt.post {
|
||||
// Send the test's simulated cluster status
|
||||
reconcilerChannel <- statusReconcilerChannelEntry{
|
||||
pods: fauxReconcilerEntry.pods,
|
||||
err: nil,
|
||||
isCompleteListOfPods: fauxReconcilerEntry.isCompleteListOfPods,
|
||||
isDeleteEventFromWatch: fauxReconcilerEntry.isDeleteEventFromWatch,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait up to 10 seconds for the test to signal success (an empty string, indicating no errors)
|
||||
expireTime = time.Now().Add(10 * time.Second)
|
||||
mostRecentError := ""
|
||||
for {
|
||||
failureReason := tt.successFn(&lfo)
|
||||
|
||||
mostRecentError = failureReason
|
||||
|
||||
if failureReason == "" {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(expireTime) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if mostRecentError != "" {
|
||||
t.Fatal(mostRecentError)
|
||||
}
|
||||
|
||||
if lfo.errorOccurred != nil {
|
||||
t.Fatalf("error occurred during test case run %v", lfo.errorOccurred)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Simulate a channel message sent to the status reconciler. See 'statusReconcilerChannelEntry' for field details
|
||||
type testReconcilerEntry struct {
|
||||
pods []*corev1.Pod
|
||||
|
||||
isCompleteListOfPods bool
|
||||
|
||||
isDeleteEventFromWatch bool
|
||||
}
|
||||
|
||||
// getMostRecentKubernetesPodStatus is a test convenience method to retrieve the most recent pod status
|
||||
func (lfo *logFuncOutput) getMostRecentKubernetesPodStatus() *machineoutput.KubernetesPodStatus {
|
||||
|
||||
lfo.listMutex.Lock()
|
||||
defer lfo.listMutex.Unlock()
|
||||
|
||||
var podStatus *machineoutput.KubernetesPodStatus
|
||||
|
||||
for _, entry := range lfo.jsonList {
|
||||
|
||||
if entry.GetType() == machineoutput.TypeKubernetesPodStatus {
|
||||
podStatus = entry.(*machineoutput.KubernetesPodStatus)
|
||||
}
|
||||
}
|
||||
|
||||
return podStatus
|
||||
}
|
||||
|
||||
// listSize is simple thread-safe wrapper around list
|
||||
func (lfo *logFuncOutput) listSize() int {
|
||||
lfo.listMutex.Lock()
|
||||
defer lfo.listMutex.Unlock()
|
||||
|
||||
return len(lfo.jsonList)
|
||||
}
|
||||
|
||||
// debugSprintAll returns a list of all machine readable JSON events that have been output thus far
|
||||
func (lfo *logFuncOutput) debugSprintAll() string {
|
||||
|
||||
lfo.listMutex.Lock()
|
||||
defer lfo.listMutex.Unlock()
|
||||
|
||||
result := ""
|
||||
|
||||
for _, entry := range lfo.jsonList {
|
||||
jsonVal, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
lfo.errorOccurred = err
|
||||
return fmt.Sprint(err)
|
||||
}
|
||||
result += string(jsonVal)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// clearList clears the internal list of received machine readable JSON events
|
||||
func (lfo *logFuncOutput) clearList() {
|
||||
lfo.listMutex.Lock()
|
||||
defer lfo.listMutex.Unlock()
|
||||
|
||||
lfo.jsonList = []machineoutput.MachineEventLogEntry{}
|
||||
}
|
||||
|
||||
// Any machine readable JSON events that are output by odo are passed to this function, and this function
|
||||
// adds them to an internal list, for test verification
|
||||
func (lfo *logFuncOutput) logFunc(wrapper machineoutput.MachineEventWrapper) {
|
||||
|
||||
lfo.listMutex.Lock()
|
||||
defer lfo.listMutex.Unlock()
|
||||
|
||||
json, err := wrapper.GetEntry()
|
||||
if err != nil {
|
||||
lfo.errorOccurred = err
|
||||
return
|
||||
}
|
||||
|
||||
machineoutput.OutputSuccessUnindented(wrapper)
|
||||
|
||||
lfo.jsonList = append(lfo.jsonList, json)
|
||||
}
|
||||
|
||||
type logFuncOutput struct {
|
||||
jsonList []machineoutput.MachineEventLogEntry
|
||||
listMutex sync.Mutex
|
||||
errorOccurred error
|
||||
}
|
||||
|
||||
func createFakePod(componentName, podName string, fn func(*corev1.Pod)) *corev1.Pod {
|
||||
fakePod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"component": componentName,
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodRunning,
|
||||
},
|
||||
}
|
||||
|
||||
if fn != nil {
|
||||
fn(fakePod)
|
||||
}
|
||||
|
||||
return fakePod
|
||||
}
|
||||
@@ -1,330 +1,14 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
)
|
||||
|
||||
// KubernetesDeploymentStatus is a simplified representation of the component's cluster resources
|
||||
type KubernetesDeploymentStatus struct {
|
||||
DeploymentUID types.UID
|
||||
ReplicaSetUID types.UID
|
||||
Pods []*corev1.Pod
|
||||
}
|
||||
|
||||
// KubernetesPodStatus is a representation of corev1.Pod, but only containing the fields we are interested in (for later marshalling to JSON)
|
||||
type KubernetesPodStatus struct {
|
||||
Name string
|
||||
UID string
|
||||
Phase string
|
||||
Labels map[string]string
|
||||
StartTime *time.Time
|
||||
Containers []corev1.ContainerStatus
|
||||
InitContainers []corev1.ContainerStatus
|
||||
}
|
||||
|
||||
// Find the pod for the component and convert to KubernetesDeploymentStatus
|
||||
func (a Adapter) getDeploymentStatus() (*KubernetesDeploymentStatus, error) {
|
||||
|
||||
// 1) Retrieve the deployment
|
||||
deployment, err := a.Client.GetOneDeployment(a.ComponentName, a.AppName)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to retrieve deployment %s in %s ", a.ComponentName, a.Client.GetCurrentNamespace())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if deployment == nil {
|
||||
return nil, errors.New("deployment status from Kubernetes API was nil")
|
||||
}
|
||||
|
||||
deploymentUID := deployment.UID
|
||||
|
||||
// 2) Retrieve the replica set that is owned by the deployment; if multiple, go with one with largest generation
|
||||
replicaSetList, err := a.Client.GetClient().AppsV1().ReplicaSets(a.Client.GetCurrentNamespace()).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matchingReplicaSets := []v1.ReplicaSet{}
|
||||
sort.Slice(replicaSetList.Items, func(i, j int) bool {
|
||||
iGen := replicaSetList.Items[i].Generation
|
||||
jGen := replicaSetList.Items[j].Generation
|
||||
|
||||
// Sort descending by generation
|
||||
return iGen > jGen
|
||||
})
|
||||
|
||||
// Locate the first matching replica, after above sort
|
||||
outer:
|
||||
for _, replicaSet := range replicaSetList.Items {
|
||||
for _, ownerRef := range replicaSet.OwnerReferences {
|
||||
if ownerRef.UID == deploymentUID {
|
||||
matchingReplicaSets = append(matchingReplicaSets, replicaSet)
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(matchingReplicaSets) == 0 {
|
||||
return nil, errors.New("no replica sets found")
|
||||
}
|
||||
|
||||
replicaSetUID := matchingReplicaSets[0].UID
|
||||
|
||||
// 3) Retrieves the pods that are owned by the ReplicaSet and return
|
||||
podList, err := a.Client.GetClient().CoreV1().Pods(a.Client.GetCurrentNamespace()).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matchingPods := []*corev1.Pod{}
|
||||
for i, podItem := range podList.Items {
|
||||
for _, ownerRef := range podItem.OwnerReferences {
|
||||
|
||||
if string(ownerRef.UID) == string(replicaSetUID) {
|
||||
matchingPods = append(matchingPods, &podList.Items[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
result := KubernetesDeploymentStatus{}
|
||||
result.Pods = append(result.Pods, matchingPods...)
|
||||
|
||||
result.DeploymentUID = deploymentUID
|
||||
result.ReplicaSetUID = replicaSetUID
|
||||
|
||||
return &result, nil
|
||||
|
||||
}
|
||||
|
||||
// CreateKubernetesPodStatusFromPod extracts only the fields we are interested in from corev1.Pod
|
||||
func CreateKubernetesPodStatusFromPod(pod corev1.Pod) KubernetesPodStatus {
|
||||
podStatus := KubernetesPodStatus{
|
||||
Name: pod.Name,
|
||||
UID: string(pod.UID),
|
||||
Phase: string(pod.Status.Phase),
|
||||
Labels: pod.Labels,
|
||||
InitContainers: []corev1.ContainerStatus{},
|
||||
Containers: []corev1.ContainerStatus{},
|
||||
}
|
||||
|
||||
if pod.Status.StartTime != nil {
|
||||
podStatus.StartTime = &pod.Status.StartTime.Time
|
||||
}
|
||||
|
||||
podStatus.InitContainers = pod.Status.InitContainerStatuses
|
||||
|
||||
podStatus.Containers = pod.Status.ContainerStatuses
|
||||
|
||||
return podStatus
|
||||
|
||||
}
|
||||
|
||||
const (
|
||||
// SupervisordCheckInterval is the time we wait before we check the supervisord statuses each time, after the first call
|
||||
SupervisordCheckInterval = time.Duration(10) * time.Second
|
||||
)
|
||||
|
||||
// StartSupervisordCtlStatusWatch kicks off a goroutine which calls 'supervisord ctl status' within every odo-managed container, every X seconds,
|
||||
// and reports the result to the console.
|
||||
func (a Adapter) StartSupervisordCtlStatusWatch() {
|
||||
|
||||
watcher := newSupervisordStatusWatch(a.Logger())
|
||||
|
||||
ticker := time.NewTicker(SupervisordCheckInterval)
|
||||
|
||||
go func() {
|
||||
|
||||
for {
|
||||
// On initial goroutine start, perform a query
|
||||
watcher.querySupervisordStatusFromContainers(a)
|
||||
<-ticker.C
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
type supervisordStatusWatcher struct {
|
||||
// See 'createSupervisordStatusReconciler' for a description of the reconciler
|
||||
statusReconcilerChannel chan supervisordStatusEvent
|
||||
}
|
||||
|
||||
func newSupervisordStatusWatch(loggingClient machineoutput.MachineEventLoggingClient) *supervisordStatusWatcher {
|
||||
inputChan := createSupervisordStatusReconciler(loggingClient)
|
||||
|
||||
return &supervisordStatusWatcher{
|
||||
statusReconcilerChannel: inputChan,
|
||||
}
|
||||
}
|
||||
|
||||
// createSupervisordStatusReconciler contains the status reconciler implementation.
|
||||
// The reconciler receives (is sent) channel messages that contains the 'supervisord ctl status' values for each odo-managed container,
|
||||
// with the result reported to the console.
|
||||
func createSupervisordStatusReconciler(loggingClient machineoutput.MachineEventLoggingClient) chan supervisordStatusEvent {
|
||||
|
||||
senderChannel := make(chan supervisordStatusEvent)
|
||||
|
||||
go func() {
|
||||
// Map key: 'podUID:containerName' (within pod) -> list of statuses from 'supervisord ctl status'
|
||||
lastContainerStatus := map[string][]supervisordStatus{}
|
||||
|
||||
for {
|
||||
|
||||
event := <-senderChannel
|
||||
|
||||
key := event.podUID + ":" + event.containerName
|
||||
|
||||
previousStatus, hasLastContainerStatus := lastContainerStatus[key]
|
||||
lastContainerStatus[key] = event.status
|
||||
|
||||
reportChange := false
|
||||
|
||||
if hasLastContainerStatus {
|
||||
// If we saw a status for this container previously...
|
||||
if !supervisordStatusesEqual(previousStatus, event.status) {
|
||||
reportChange = true
|
||||
} else {
|
||||
reportChange = false
|
||||
}
|
||||
|
||||
} else {
|
||||
// No status from the container previously...
|
||||
reportChange = true
|
||||
}
|
||||
|
||||
entries := []machineoutput.SupervisordStatusEntry{}
|
||||
|
||||
for _, status := range event.status {
|
||||
entries = append(entries, machineoutput.SupervisordStatusEntry{
|
||||
Program: status.program,
|
||||
Status: status.status,
|
||||
})
|
||||
}
|
||||
|
||||
loggingClient.SupervisordStatus(entries, machineoutput.TimestampNow())
|
||||
|
||||
if reportChange {
|
||||
klog.V(4).Infof("Ccontainer %v status has changed - is: %v", event.containerName, event.status)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
return senderChannel
|
||||
}
|
||||
|
||||
// querySupervisordStatusFromContainers locates the correct component's pod, and for each container within the pod queries the supervisord ctl status.
|
||||
// The status results are sent to the reconciler.
|
||||
func (sw *supervisordStatusWatcher) querySupervisordStatusFromContainers(a Adapter) {
|
||||
|
||||
status, err := a.getDeploymentStatus()
|
||||
if err != nil {
|
||||
a.Logger().ReportError(errors.Wrap(err, "unable to retrieve container status"), machineoutput.TimestampNow())
|
||||
return
|
||||
}
|
||||
|
||||
if status == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Given a list of odo-managed pods, we want to find the newest; if there are multiple with the same age, then find the most
|
||||
// alive by container status.
|
||||
var podPhaseSortOrder = map[corev1.PodPhase]int{
|
||||
corev1.PodFailed: 0,
|
||||
corev1.PodSucceeded: 1,
|
||||
corev1.PodUnknown: 2,
|
||||
corev1.PodPending: 3,
|
||||
corev1.PodRunning: 4,
|
||||
}
|
||||
sort.Slice(status.Pods, func(i, j int) bool {
|
||||
|
||||
iPod := status.Pods[i]
|
||||
jPod := status.Pods[j]
|
||||
|
||||
iTime := iPod.CreationTimestamp.Time
|
||||
jTime := jPod.CreationTimestamp.Time
|
||||
|
||||
if !jTime.Equal(iTime) {
|
||||
// Sort descending by creation timestamp
|
||||
return jTime.After(iTime)
|
||||
}
|
||||
|
||||
// Next, sort descending to find the pod with most successful pod phase:
|
||||
// PodRunning > PodPending > PodUnknown > PodSucceeded > PodFailed
|
||||
return podPhaseSortOrder[jPod.Status.Phase] > podPhaseSortOrder[iPod.Status.Phase]
|
||||
})
|
||||
|
||||
if len(status.Pods) < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve the first pod, which post-sort should be the most recent and most alive
|
||||
pod := status.Pods[0]
|
||||
|
||||
debugCommand, err := common.GetDebugCommand(a.Devfile.Data, a.devfileDebugCmd)
|
||||
if err != nil {
|
||||
a.Logger().ReportError(errors.Wrap(err, "unable to retrieve debug command"), machineoutput.TimestampNow())
|
||||
return
|
||||
}
|
||||
|
||||
runCommand, err := common.GetRunCommand(a.Devfile.Data, a.devfileRunCmd)
|
||||
if err != nil {
|
||||
a.Logger().ReportError(errors.Wrap(err, "unable to retrieve run command"), machineoutput.TimestampNow())
|
||||
return
|
||||
}
|
||||
|
||||
// For each of the containers, retrieve the status of the tasks and send that status back to the status reconciler
|
||||
for _, container := range pod.Status.ContainerStatuses {
|
||||
|
||||
if (runCommand.Exec != nil && container.Name == runCommand.Exec.Component) || (debugCommand.Exec != nil && container.Name == debugCommand.Exec.Component) {
|
||||
status := getSupervisordStatusInContainer(pod.Name, container.Name, a)
|
||||
|
||||
sw.statusReconcilerChannel <- supervisordStatusEvent{
|
||||
containerName: container.Name,
|
||||
status: status,
|
||||
podUID: string(pod.UID),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// supervisordStatusesEqual is a simple comparison of []supervisord that ignores slice element order
|
||||
func supervisordStatusesEqual(one []supervisordStatus, two []supervisordStatus) bool {
|
||||
if len(one) != len(two) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, oneVal := range one {
|
||||
|
||||
match := false
|
||||
for _, twoVal := range two {
|
||||
|
||||
if reflect.DeepEqual(oneVal, twoVal) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getSupervisordStatusInContainer executes 'supervisord ctl status' within the pod and container, parses the output,
|
||||
// and returns the status for the container
|
||||
func getSupervisordStatusInContainer(podName string, containerName string, a Adapter) []supervisordStatus {
|
||||
@@ -375,10 +59,3 @@ type supervisordStatus struct {
|
||||
program string
|
||||
status string
|
||||
}
|
||||
|
||||
// All statuses seen within the container
|
||||
type supervisordStatusEvent struct {
|
||||
containerName string
|
||||
podUID string
|
||||
status []supervisordStatus
|
||||
}
|
||||
|
||||
@@ -1,284 +0,0 @@
|
||||
package component
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
|
||||
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
devfileParser "github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/testingutil"
|
||||
applabels "github.com/redhat-developer/odo/pkg/application/labels"
|
||||
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
|
||||
adaptersCommon "github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestGetDeploymentStatus(t *testing.T) {
|
||||
|
||||
testComponentName := "component"
|
||||
testAppName := "app"
|
||||
|
||||
deploymentName, err := util.NamespaceKubernetesObject(testComponentName, testAppName)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
envInfo envinfo.EnvSpecificInfo
|
||||
running bool
|
||||
wantErr bool
|
||||
deployment v1.Deployment
|
||||
replicaSet v1.ReplicaSetList
|
||||
podSet corev1.PodList
|
||||
expectedDeploymentUID string
|
||||
expectedReplicaSetUID string
|
||||
expectedPodUID string
|
||||
}{
|
||||
{
|
||||
name: "Case 1: A single deployment, matching replica, and matching pod",
|
||||
envInfo: envinfo.EnvSpecificInfo{},
|
||||
running: false,
|
||||
wantErr: false,
|
||||
deployment: v1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: kclient.DeploymentKind,
|
||||
APIVersion: kclient.DeploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
UID: types.UID("deployment-uid"),
|
||||
Labels: map[string]string{
|
||||
componentlabels.ComponentLabel: testComponentName,
|
||||
applabels.ApplicationLabel: testAppName,
|
||||
},
|
||||
},
|
||||
},
|
||||
replicaSet: v1.ReplicaSetList{
|
||||
Items: []v1.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "replica-set-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("deployment-uid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicaSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
podSet: corev1.PodList{
|
||||
Items: []corev1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("replica-set-uid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDeploymentUID: "deployment-uid",
|
||||
expectedReplicaSetUID: "replica-set-uid",
|
||||
expectedPodUID: "pod-uid",
|
||||
},
|
||||
{
|
||||
name: "Case 2: A single deployment, multiple replicas with different generations, and a single matching pod",
|
||||
envInfo: envinfo.EnvSpecificInfo{},
|
||||
running: false,
|
||||
wantErr: false,
|
||||
deployment: v1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: kclient.DeploymentKind,
|
||||
APIVersion: kclient.DeploymentAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
UID: types.UID("deployment-uid"),
|
||||
Labels: map[string]string{
|
||||
componentlabels.ComponentLabel: testComponentName,
|
||||
applabels.ApplicationLabel: testAppName,
|
||||
},
|
||||
},
|
||||
},
|
||||
replicaSet: v1.ReplicaSetList{
|
||||
Items: []v1.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "replica-set-uid1",
|
||||
Generation: 1,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("deployment-uid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicaSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "replica-set-uid2",
|
||||
Generation: 2,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("deployment-uid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicaSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
podSet: corev1.PodList{
|
||||
Items: []corev1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "pod-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("replica-set-uid2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDeploymentUID: "deployment-uid",
|
||||
expectedReplicaSetUID: "replica-set-uid2",
|
||||
expectedPodUID: "pod-uid",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
comp := testingutil.GetFakeContainerComponent(testComponentName)
|
||||
devObj := devfileParser.DevfileObj{
|
||||
Data: func() data.DevfileData {
|
||||
devfileData, err := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = devfileData.AddComponents([]devfilev1.Component{comp})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = devfileData.AddCommands([]devfilev1.Command{getExecCommand("run", devfilev1.RunCommandGroupKind)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return devfileData
|
||||
}(),
|
||||
}
|
||||
|
||||
adapterCtx := adaptersCommon.AdapterContext{
|
||||
ComponentName: testComponentName,
|
||||
AppName: testAppName,
|
||||
Devfile: devObj,
|
||||
}
|
||||
|
||||
fkclient, fkclientset := kclient.FakeNew()
|
||||
|
||||
// Return test case's deployment, when requested
|
||||
fkclientset.Kubernetes.PrependReactor("get", "*", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
if getAction, is := action.(ktesting.GetAction); is && getAction.GetName() == deploymentName {
|
||||
return true, &tt.deployment, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
// Return test case's deployment, when requested
|
||||
fkclientset.Kubernetes.PrependReactor("patch", "*", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
if patchAction, is := action.(ktesting.PatchAction); is && patchAction.GetName() == deploymentName {
|
||||
return true, &tt.deployment, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
// Return test case's deployment, when requested
|
||||
fkclientset.Kubernetes.PrependReactor("apply", "*", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
if patchAction, is := action.(ktesting.PatchAction); is && patchAction.GetName() == deploymentName {
|
||||
return true, &tt.deployment, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
// Return test cases's replicasets, or pods, when requested
|
||||
fkclientset.Kubernetes.PrependReactor("list", "*", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
switch action.GetResource().Resource {
|
||||
case "replicasets":
|
||||
return true, &tt.replicaSet, nil
|
||||
case "pods":
|
||||
return true, &tt.podSet, nil
|
||||
case "deployments":
|
||||
return true, &v1.DeploymentList{Items: []v1.Deployment{tt.deployment}}, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
tt.envInfo.EnvInfo = *envinfo.GetFakeEnvInfo(envinfo.ComponentSettings{
|
||||
Name: testComponentName,
|
||||
AppName: testAppName,
|
||||
})
|
||||
|
||||
componentAdapter := New(adapterCtx, fkclient, nil)
|
||||
fkclient.Namespace = componentAdapter.Client.GetCurrentNamespace()
|
||||
err := componentAdapter.createOrUpdateComponent(tt.running, tt.envInfo, false)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Call the function to test
|
||||
result, err := componentAdapter.getDeploymentStatus()
|
||||
// Checks for unexpected error cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Fatalf("unexpected error %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if string(result.DeploymentUID) != tt.expectedDeploymentUID {
|
||||
t.Fatalf("could not find expected deployment UID %s %s", string(result.DeploymentUID), tt.expectedDeploymentUID)
|
||||
}
|
||||
|
||||
if string(result.ReplicaSetUID) != tt.expectedReplicaSetUID {
|
||||
t.Fatalf("could not find expected replica set UID %s %s", string(result.ReplicaSetUID), tt.expectedReplicaSetUID)
|
||||
}
|
||||
|
||||
if result.Pods == nil || len(result.Pods) != 1 {
|
||||
t.Fatalf("results of this test should match 1 pod")
|
||||
}
|
||||
|
||||
if string(result.Pods[0].UID) != tt.expectedPodUID {
|
||||
t.Fatalf("pod UID did not match expected pod UID: %s %s", string(result.Pods[0].UID), tt.expectedPodUID)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
devfileParser "github.com/devfile/library/pkg/devfile/parser"
|
||||
adaptersCommon "github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/storage"
|
||||
@@ -43,16 +42,6 @@ func GetOdoContainerVolumes(sourcePVCName string) []corev1.Volume {
|
||||
}
|
||||
}
|
||||
|
||||
// ComponentExists checks whether a deployment by the given name exists in the given app
|
||||
func ComponentExists(client kclient.ClientInterface, name string, app string) (bool, error) {
|
||||
deployment, err := client.GetOneDeployment(name, app)
|
||||
if _, ok := err.(*kclient.DeploymentNotFoundError); ok {
|
||||
klog.V(2).Infof("Deployment %s not found for belonging to the %s app ", name, app)
|
||||
return false, nil
|
||||
}
|
||||
return deployment != nil, err
|
||||
}
|
||||
|
||||
// isEnvPresent checks if the env variable is present in an array of env variables
|
||||
func isEnvPresent(EnvVars []corev1.EnvVar, envVarName string) bool {
|
||||
isPresent := false
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/storage"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
@@ -86,7 +87,7 @@ func TestComponentExists(t *testing.T) {
|
||||
})
|
||||
|
||||
// Verify that a component with the specified name exists
|
||||
componentExists, err := ComponentExists(fkclient, tt.getComponentName, tt.appName)
|
||||
componentExists, err := component.ComponentExists(fkclient, tt.getComponentName, tt.appName)
|
||||
if !tt.wantErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if !tt.wantErr && componentExists != tt.want {
|
||||
|
||||
@@ -5,12 +5,10 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
devfile "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions"
|
||||
)
|
||||
|
||||
// Backend is in interface that must be implemented by container runtimes
|
||||
@@ -27,14 +25,13 @@ var lookPathCmd = exec.LookPath
|
||||
|
||||
// BuildPushImages build all images defined in the devfile with the detected backend
|
||||
// If push is true, also push the images to their registries
|
||||
func BuildPushImages(ctx *genericclioptions.Context, push bool) error {
|
||||
func BuildPushImages(devfileObj parser.DevfileObj, path string, push bool) error {
|
||||
|
||||
backend, err := selectBackend()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
devfileObj := ctx.EnvSpecificInfo.GetDevfileObj()
|
||||
components, err := devfileObj.Data.GetComponents(common.DevfileOptions{
|
||||
ComponentOptions: common.ComponentOptions{ComponentType: devfile.ImageComponentType},
|
||||
})
|
||||
@@ -42,10 +39,8 @@ func BuildPushImages(ctx *genericclioptions.Context, push bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
devfilePath := filepath.Dir(ctx.EnvSpecificInfo.GetDevfilePath())
|
||||
|
||||
for _, component := range components {
|
||||
err = buildPushImage(backend, component.Image, devfilePath, push)
|
||||
err = buildPushImage(backend, component.Image, path, push)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -38,11 +38,3 @@ type CompositeRunKindError struct {
|
||||
func (e *CompositeRunKindError) Error() string {
|
||||
return "composite commands of run kind are not supported currently"
|
||||
}
|
||||
|
||||
type UnsupportedFieldError struct {
|
||||
fieldName string
|
||||
}
|
||||
|
||||
func (e *UnsupportedFieldError) Error() string {
|
||||
return fmt.Sprintf("%q is not supported in odo", e.fieldName)
|
||||
}
|
||||
|
||||
@@ -99,7 +99,6 @@ type ClientInterface interface {
|
||||
WaitAndGetPodWithEvents(selector string, desiredPhase corev1.PodPhase, waitMessage string, pushTimeout time.Duration) (*corev1.Pod, error)
|
||||
ExecCMDInContainer(containerName, podName string, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error
|
||||
ExtractProjectToComponent(containerName, podName string, targetPath string, stdin io.Reader) error
|
||||
GetOnePod(componentName, appName string) (*corev1.Pod, error)
|
||||
GetPodUsingComponentName(componentName string) (*corev1.Pod, error)
|
||||
GetOnePodFromSelector(selector string) (*corev1.Pod, error)
|
||||
GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error)
|
||||
|
||||
@@ -734,21 +734,6 @@ func (mr *MockClientInterfaceMockRecorder) GetOneIngressFromSelector(selector in
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOneIngressFromSelector", reflect.TypeOf((*MockClientInterface)(nil).GetOneIngressFromSelector), selector)
|
||||
}
|
||||
|
||||
// GetOnePod mocks base method.
|
||||
func (m *MockClientInterface) GetOnePod(componentName, appName string) (*v12.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetOnePod", componentName, appName)
|
||||
ret0, _ := ret[0].(*v12.Pod)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetOnePod indicates an expected call of GetOnePod.
|
||||
func (mr *MockClientInterfaceMockRecorder) GetOnePod(componentName, appName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnePod", reflect.TypeOf((*MockClientInterface)(nil).GetOnePod), componentName, appName)
|
||||
}
|
||||
|
||||
// GetOnePodFromSelector mocks base method.
|
||||
func (m *MockClientInterface) GetOnePodFromSelector(selector string) (*v12.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
// api resource types
|
||||
|
||||
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -186,11 +185,6 @@ func (c *Client) ExtractProjectToComponent(containerName, podName string, target
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOnePod gets a pod using the component and app name
|
||||
func (c *Client) GetOnePod(componentName, appName string) (*corev1.Pod, error) {
|
||||
return c.GetOnePodFromSelector(componentlabels.GetSelector(componentName, appName))
|
||||
}
|
||||
|
||||
// GetPodUsingComponentName gets a pod using the component name
|
||||
func (c *Client) GetPodUsingComponentName(componentName string) (*corev1.Pod, error) {
|
||||
podSelector := fmt.Sprintf("component=%s", componentName)
|
||||
|
||||
60
pkg/libdevfile/command.go
Normal file
60
pkg/libdevfile/command.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
)
|
||||
|
||||
type command interface {
|
||||
CheckValidity() error
|
||||
Execute(handler Handler) error
|
||||
}
|
||||
|
||||
// newCommand returns a command implementation, depending on the type of the command
|
||||
func newCommand(devfileObj parser.DevfileObj, devfileCmd v1alpha2.Command) (command, error) {
|
||||
var cmd command
|
||||
|
||||
commandType, err := common.GetCommandType(devfileCmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch commandType {
|
||||
|
||||
case v1alpha2.ApplyCommandType:
|
||||
cmd = newApplyCommand(devfileObj, devfileCmd)
|
||||
|
||||
case v1alpha2.CompositeCommandType:
|
||||
if util.SafeGetBool(devfileCmd.Composite.Parallel) {
|
||||
cmd = newParallelCompositeCommand(devfileObj, devfileCmd)
|
||||
}
|
||||
cmd = newCompositeCommand(devfileObj, devfileCmd)
|
||||
|
||||
case v1alpha2.ExecCommandType:
|
||||
cmd = newExecCommand(devfileObj, devfileCmd)
|
||||
}
|
||||
|
||||
if err = cmd.CheckValidity(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
// allCommandsMap returns a map of all commands in the devfile, indexed by Id
|
||||
func allCommandsMap(devfileObj parser.DevfileObj) (map[string]v1alpha2.Command, error) {
|
||||
commands, err := devfileObj.Data.GetCommands(common.DevfileOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commandMap := make(map[string]v1alpha2.Command, len(commands))
|
||||
for _, command := range commands {
|
||||
commandMap[strings.ToLower(command.Id)] = command
|
||||
}
|
||||
|
||||
return commandMap, nil
|
||||
}
|
||||
49
pkg/libdevfile/command_apply.go
Normal file
49
pkg/libdevfile/command_apply.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
)
|
||||
|
||||
// applyCommand is a command implementation for Apply commands
|
||||
type applyCommand struct {
|
||||
command v1alpha2.Command
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
// newApplyCommand creates a new applyCommand instance
|
||||
func newApplyCommand(devfileObj parser.DevfileObj, command v1alpha2.Command) *applyCommand {
|
||||
return &applyCommand{
|
||||
command: command,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *applyCommand) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *applyCommand) Execute(handler Handler) error {
|
||||
devfileComponents, err := o.devfileObj.Data.GetComponents(common.DevfileOptions{
|
||||
FilterByName: o.command.Apply.Component,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(devfileComponents) == 0 {
|
||||
return NewComponentNotExistError(o.command.Apply.Component)
|
||||
}
|
||||
|
||||
if len(devfileComponents) != 1 {
|
||||
return NewComponentsWithSameNameError()
|
||||
}
|
||||
|
||||
component, err := newComponent(o.devfileObj, devfileComponents[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return component.Apply(handler)
|
||||
}
|
||||
65
pkg/libdevfile/command_apply_test.go
Normal file
65
pkg/libdevfile/command_apply_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile/generator"
|
||||
)
|
||||
|
||||
func Test_applyCommand_Execute(t *testing.T) {
|
||||
|
||||
command1 := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Id: "command1",
|
||||
Component: "component",
|
||||
})
|
||||
component := generator.GetContainerComponent(generator.ContainerComponentParams{
|
||||
Name: "component",
|
||||
})
|
||||
component1 := generator.GetContainerComponent(generator.ContainerComponentParams{
|
||||
Name: "component1",
|
||||
})
|
||||
component2 := generator.GetContainerComponent(generator.ContainerComponentParams{
|
||||
Name: "component2",
|
||||
})
|
||||
|
||||
type fields struct {
|
||||
command v1alpha2.Command
|
||||
devfileObj func() parser.DevfileObj
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "execute an apply command",
|
||||
fields: fields{
|
||||
command: command1,
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{command1})
|
||||
_ = data.AddComponents([]v1alpha2.Component{component, component1, component2})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &applyCommand{
|
||||
command: tt.fields.command,
|
||||
devfileObj: tt.fields.devfileObj(),
|
||||
}
|
||||
// TODO handler
|
||||
if err := o.Execute(nil); (err != nil) != tt.wantErr {
|
||||
t.Errorf("applyCommand.Execute() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
56
pkg/libdevfile/command_composite.go
Normal file
56
pkg/libdevfile/command_composite.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// compositeCommand is a command implementation that represents non-parallel composite commands
|
||||
type compositeCommand struct {
|
||||
command v1alpha2.Command
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
// newCompositeCommand creates a new command implementation which will execute the provided commands in the specified order
|
||||
func newCompositeCommand(devfileObj parser.DevfileObj, command v1alpha2.Command) *compositeCommand {
|
||||
return &compositeCommand{
|
||||
command: command,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *compositeCommand) CheckValidity() error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmds := o.command.Composite.Commands
|
||||
for _, cmd := range cmds {
|
||||
if _, ok := allCommands[strings.ToLower(cmd)]; !ok {
|
||||
return fmt.Errorf("composite command %q references command %q not found in devfile", o.command.Id, cmd)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute loops over each command and executes them serially
|
||||
func (o *compositeCommand) Execute(handler Handler) error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, devfileCmd := range o.command.Composite.Commands {
|
||||
cmd, err := newCommand(o.devfileObj, allCommands[strings.ToLower(devfileCmd)])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.Execute(handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
66
pkg/libdevfile/command_composite_parallel.go
Normal file
66
pkg/libdevfile/command_composite_parallel.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
)
|
||||
|
||||
// parallelCompositeCommand is a command implementation that represents parallel composite commands
|
||||
type parallelCompositeCommand struct {
|
||||
command v1alpha2.Command
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
// newParallelCompositeCommand creates a new command implementation which will execute the provided commands in parallel
|
||||
func newParallelCompositeCommand(devfileObj parser.DevfileObj, command v1alpha2.Command) *parallelCompositeCommand {
|
||||
return ¶llelCompositeCommand{
|
||||
command: command,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *parallelCompositeCommand) CheckValidity() error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmds := o.command.Composite.Commands
|
||||
for _, cmd := range cmds {
|
||||
if _, ok := allCommands[strings.ToLower(cmd)]; !ok {
|
||||
return fmt.Errorf("composite command %q has command %v not found in devfile", cmd, o.command.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute loops over each command and executes them in parallel
|
||||
func (o *parallelCompositeCommand) Execute(handler Handler) error {
|
||||
allCommands, err := allCommandsMap(o.devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
commandExecs := util.NewConcurrentTasks(len(o.command.Composite.Commands))
|
||||
for _, devfileCmd := range o.command.Composite.Commands {
|
||||
cmd, err2 := newCommand(o.devfileObj, allCommands[devfileCmd])
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
commandExecs.Add(util.ConcurrentTask{
|
||||
ToRun: func(errChannel chan error) {
|
||||
err3 := cmd.Execute(handler)
|
||||
if err3 != nil {
|
||||
errChannel <- err3
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
err = commandExecs.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("parallel command execution failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
29
pkg/libdevfile/command_exec.go
Normal file
29
pkg/libdevfile/command_exec.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// execCommand is a command implementation for exec commands
|
||||
type execCommand struct {
|
||||
command v1alpha2.Command
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
// newExecCommand creates a new execCommand instance, adapting the devfile-defined command to run in the target component's
|
||||
// container, modifying it to add environment variables or adapting the path as needed.
|
||||
func newExecCommand(devfileObj parser.DevfileObj, command v1alpha2.Command) *execCommand {
|
||||
return &execCommand{
|
||||
command: command,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *execCommand) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *execCommand) Execute(handler Handler) error {
|
||||
return handler.Execute(o.command)
|
||||
}
|
||||
88
pkg/libdevfile/command_test.go
Normal file
88
pkg/libdevfile/command_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile/generator"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func Test_newCommand(t *testing.T) {
|
||||
|
||||
execCommand := generator.GetExecCommand(generator.ExecCommandParams{
|
||||
Kind: v1alpha2.RunCommandGroupKind,
|
||||
Id: "exec-command",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
})
|
||||
compositeCommand := generator.GetCompositeCommand(generator.CompositeCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "composite-command",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
})
|
||||
applyCommand := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "apply-command",
|
||||
IsDefault: pointer.BoolPtr(false),
|
||||
})
|
||||
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{execCommand, compositeCommand, applyCommand})
|
||||
devfileObj := parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
devfileObj parser.DevfileObj
|
||||
devfileCmd v1alpha2.Command
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantType string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "exec command",
|
||||
args: args{
|
||||
devfileObj: devfileObj,
|
||||
devfileCmd: execCommand,
|
||||
},
|
||||
wantType: "*libdevfile.execCommand",
|
||||
},
|
||||
{
|
||||
name: "composite command",
|
||||
args: args{
|
||||
devfileObj: devfileObj,
|
||||
devfileCmd: compositeCommand,
|
||||
},
|
||||
wantType: "*libdevfile.compositeCommand",
|
||||
},
|
||||
{
|
||||
name: "apply command",
|
||||
args: args{
|
||||
devfileObj: devfileObj,
|
||||
devfileCmd: applyCommand,
|
||||
},
|
||||
wantType: "*libdevfile.applyCommand",
|
||||
},
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := newCommand(tt.args.devfileObj, tt.args.devfileCmd)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("newCommand() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
gotType := fmt.Sprintf("%T", got)
|
||||
if gotType != tt.wantType {
|
||||
t.Errorf("newCommand() type = %v, want %v", got, tt.wantType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
44
pkg/libdevfile/component.go
Normal file
44
pkg/libdevfile/component.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
)
|
||||
|
||||
type component interface {
|
||||
CheckValidity() error
|
||||
Apply(handler Handler) error
|
||||
}
|
||||
|
||||
// newComponent creates a concrete component, based on its type
|
||||
func newComponent(devfileObj parser.DevfileObj, devfileCmp v1alpha2.Component) (component, error) {
|
||||
var cmp component
|
||||
|
||||
componentType, err := common.GetComponentType(devfileCmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch componentType {
|
||||
|
||||
case v1alpha2.ContainerComponentType:
|
||||
cmp = newContainerComponent(devfileObj, devfileCmp)
|
||||
|
||||
case v1alpha2.KubernetesComponentType:
|
||||
cmp = newKubernetesComponent(devfileObj, devfileCmp)
|
||||
|
||||
case v1alpha2.OpenshiftComponentType:
|
||||
cmp = newOpenshiftComponent(devfileObj, devfileCmp)
|
||||
|
||||
case v1alpha2.VolumeComponentType:
|
||||
cmp = newVolumeComponent(devfileObj, devfileCmp)
|
||||
|
||||
case v1alpha2.ImageComponentType:
|
||||
cmp = newImageComponent(devfileObj, devfileCmp)
|
||||
}
|
||||
|
||||
if err := cmp.CheckValidity(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cmp, nil
|
||||
}
|
||||
27
pkg/libdevfile/component_container.go
Normal file
27
pkg/libdevfile/component_container.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// containerComponent implements the component interface
|
||||
type containerComponent struct {
|
||||
component v1alpha2.Component
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
func newContainerComponent(devfileObj parser.DevfileObj, component v1alpha2.Component) *containerComponent {
|
||||
return &containerComponent{
|
||||
component: component,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *containerComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *containerComponent) Apply(handler Handler) error {
|
||||
return nil
|
||||
}
|
||||
27
pkg/libdevfile/component_image.go
Normal file
27
pkg/libdevfile/component_image.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// imageComponent implements the component interface
|
||||
type imageComponent struct {
|
||||
component v1alpha2.Component
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
func newImageComponent(devfileObj parser.DevfileObj, component v1alpha2.Component) *imageComponent {
|
||||
return &imageComponent{
|
||||
component: component,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *imageComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *imageComponent) Apply(handler Handler) error {
|
||||
return handler.ApplyImage(e.component)
|
||||
}
|
||||
27
pkg/libdevfile/component_kubernetes.go
Normal file
27
pkg/libdevfile/component_kubernetes.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// kubernetesComponent implements the component interface
|
||||
type kubernetesComponent struct {
|
||||
component v1alpha2.Component
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
func newKubernetesComponent(devfileObj parser.DevfileObj, component v1alpha2.Component) *kubernetesComponent {
|
||||
return &kubernetesComponent{
|
||||
component: component,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *kubernetesComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *kubernetesComponent) Apply(handler Handler) error {
|
||||
return handler.ApplyKubernetes(e.component)
|
||||
}
|
||||
27
pkg/libdevfile/component_openshift.go
Normal file
27
pkg/libdevfile/component_openshift.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// openshiftComponent implements the component interface
|
||||
type openshiftComponent struct {
|
||||
component v1alpha2.Component
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
func newOpenshiftComponent(devfileObj parser.DevfileObj, component v1alpha2.Component) *openshiftComponent {
|
||||
return &openshiftComponent{
|
||||
component: component,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *openshiftComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *openshiftComponent) Apply(handler Handler) error {
|
||||
return nil
|
||||
}
|
||||
27
pkg/libdevfile/component_volume.go
Normal file
27
pkg/libdevfile/component_volume.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
)
|
||||
|
||||
// volumeComponent implements the component interface
|
||||
type volumeComponent struct {
|
||||
component v1alpha2.Component
|
||||
devfileObj parser.DevfileObj
|
||||
}
|
||||
|
||||
func newVolumeComponent(devfileObj parser.DevfileObj, component v1alpha2.Component) *volumeComponent {
|
||||
return &volumeComponent{
|
||||
component: component,
|
||||
devfileObj: devfileObj,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *volumeComponent) CheckValidity() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *volumeComponent) Apply(handler Handler) error {
|
||||
return nil
|
||||
}
|
||||
75
pkg/libdevfile/errors.go
Normal file
75
pkg/libdevfile/errors.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
)
|
||||
|
||||
// NoCommandFoundError is returned when no command of the specified kind is found in devfile
|
||||
type NoCommandFoundError struct {
|
||||
kind v1alpha2.CommandGroupKind
|
||||
}
|
||||
|
||||
func NewNoCommandFoundError(kind v1alpha2.CommandGroupKind) NoCommandFoundError {
|
||||
return NoCommandFoundError{
|
||||
kind: kind,
|
||||
}
|
||||
}
|
||||
func (e NoCommandFoundError) Error() string {
|
||||
return fmt.Sprintf("no %s command found in devfile", e.kind)
|
||||
}
|
||||
|
||||
// NoDefaultCommandFoundError is returned when several commands of the specified kind exist
|
||||
// but no one is the default one
|
||||
type NoDefaultCommandFoundError struct {
|
||||
kind v1alpha2.CommandGroupKind
|
||||
}
|
||||
|
||||
func NewNoDefaultCommandFoundError(kind v1alpha2.CommandGroupKind) NoDefaultCommandFoundError {
|
||||
return NoDefaultCommandFoundError{
|
||||
kind: kind,
|
||||
}
|
||||
}
|
||||
func (e NoDefaultCommandFoundError) Error() string {
|
||||
return fmt.Sprintf("no default %s command found in devfile", e.kind)
|
||||
}
|
||||
|
||||
// MoreThanOneDefaultCommandFoundError is returned when several default commands of the specified kind exist
|
||||
type MoreThanOneDefaultCommandFoundError struct {
|
||||
kind v1alpha2.CommandGroupKind
|
||||
}
|
||||
|
||||
func NewMoreThanOneDefaultCommandFoundError(kind v1alpha2.CommandGroupKind) MoreThanOneDefaultCommandFoundError {
|
||||
return MoreThanOneDefaultCommandFoundError{
|
||||
kind: kind,
|
||||
}
|
||||
}
|
||||
func (e MoreThanOneDefaultCommandFoundError) Error() string {
|
||||
return fmt.Sprintf("more than one default %s command found in devfile, this should not happen", e.kind)
|
||||
}
|
||||
|
||||
// ComponentNotExistError is returned when a component referenced in a command or component does not exist
|
||||
type ComponentNotExistError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func NewComponentNotExistError(name string) ComponentNotExistError {
|
||||
return ComponentNotExistError{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (e ComponentNotExistError) Error() string {
|
||||
return fmt.Sprintf("component %q does not exists", e.name)
|
||||
}
|
||||
|
||||
type ComponentsWithSameNameError struct{}
|
||||
|
||||
func NewComponentsWithSameNameError() ComponentsWithSameNameError {
|
||||
return ComponentsWithSameNameError{}
|
||||
}
|
||||
|
||||
func (e ComponentsWithSameNameError) Error() string {
|
||||
return "more than one component with the same name, should not happen"
|
||||
}
|
||||
127
pkg/libdevfile/generator/command.go
Normal file
127
pkg/libdevfile/generator/command.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package generator
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/api/v2/pkg/attributes"
|
||||
)
|
||||
|
||||
type CompositeCommandParams struct {
|
||||
Id string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
Commands []string
|
||||
Parallel *bool
|
||||
|
||||
Label *string
|
||||
Kind v1alpha2.CommandGroupKind
|
||||
IsDefault *bool
|
||||
}
|
||||
|
||||
func GetCompositeCommand(params CompositeCommandParams) v1alpha2.Command {
|
||||
cmd := v1alpha2.Command{
|
||||
Id: params.Id,
|
||||
CommandUnion: v1alpha2.CommandUnion{
|
||||
Composite: &v1alpha2.CompositeCommand{
|
||||
LabeledCommand: v1alpha2.LabeledCommand{
|
||||
BaseCommand: v1alpha2.BaseCommand{
|
||||
Group: &v1alpha2.CommandGroup{
|
||||
Kind: params.Kind,
|
||||
IsDefault: params.IsDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
Commands: params.Commands,
|
||||
Parallel: params.Parallel,
|
||||
},
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmd.Attributes = *params.Attributes
|
||||
}
|
||||
if params.Label != nil {
|
||||
cmd.Composite.Label = *params.Label
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ExecCommandParams struct {
|
||||
Id string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
CommandLine string
|
||||
Component string
|
||||
WorkingDir string
|
||||
Env []v1alpha2.EnvVar
|
||||
HotReloadCapable *bool
|
||||
|
||||
Label *string
|
||||
Kind v1alpha2.CommandGroupKind
|
||||
IsDefault *bool
|
||||
}
|
||||
|
||||
func GetExecCommand(params ExecCommandParams) v1alpha2.Command {
|
||||
cmd := v1alpha2.Command{
|
||||
Id: params.Id,
|
||||
CommandUnion: v1alpha2.CommandUnion{
|
||||
Exec: &v1alpha2.ExecCommand{
|
||||
LabeledCommand: v1alpha2.LabeledCommand{
|
||||
BaseCommand: v1alpha2.BaseCommand{
|
||||
Group: &v1alpha2.CommandGroup{
|
||||
Kind: params.Kind,
|
||||
IsDefault: params.IsDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
CommandLine: params.CommandLine,
|
||||
Component: params.Component,
|
||||
WorkingDir: params.WorkingDir,
|
||||
Env: params.Env,
|
||||
HotReloadCapable: params.HotReloadCapable,
|
||||
},
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmd.Attributes = *params.Attributes
|
||||
}
|
||||
if params.Label != nil {
|
||||
cmd.Composite.Label = *params.Label
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ApplyCommandParams struct {
|
||||
Id string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
Component string
|
||||
|
||||
Label *string
|
||||
Kind v1alpha2.CommandGroupKind
|
||||
IsDefault *bool
|
||||
}
|
||||
|
||||
func GetApplyCommand(params ApplyCommandParams) v1alpha2.Command {
|
||||
cmd := v1alpha2.Command{
|
||||
Id: params.Id,
|
||||
CommandUnion: v1alpha2.CommandUnion{
|
||||
Apply: &v1alpha2.ApplyCommand{
|
||||
LabeledCommand: v1alpha2.LabeledCommand{
|
||||
BaseCommand: v1alpha2.BaseCommand{
|
||||
Group: &v1alpha2.CommandGroup{
|
||||
Kind: params.Kind,
|
||||
IsDefault: params.IsDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
Component: params.Component,
|
||||
},
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmd.Attributes = *params.Attributes
|
||||
}
|
||||
if params.Label != nil {
|
||||
cmd.Composite.Label = *params.Label
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
72
pkg/libdevfile/generator/component.go
Normal file
72
pkg/libdevfile/generator/component.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package generator
|
||||
|
||||
import (
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/api/v2/pkg/attributes"
|
||||
)
|
||||
|
||||
type ContainerComponentParams struct {
|
||||
Name string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
Container v1alpha2.Container
|
||||
Endpoints []v1alpha2.Endpoint
|
||||
}
|
||||
|
||||
func GetContainerComponent(params ContainerComponentParams) v1alpha2.Component {
|
||||
cmp := v1alpha2.Component{
|
||||
Name: params.Name,
|
||||
ComponentUnion: v1alpha2.ComponentUnion{
|
||||
Container: &v1alpha2.ContainerComponent{
|
||||
Container: params.Container,
|
||||
Endpoints: params.Endpoints,
|
||||
},
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmp.Attributes = *params.Attributes
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
type ImageComponentParams struct {
|
||||
Name string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
Image v1alpha2.Image
|
||||
}
|
||||
|
||||
func GetImageComponent(params ImageComponentParams) v1alpha2.Component {
|
||||
cmp := v1alpha2.Component{
|
||||
Name: params.Name,
|
||||
ComponentUnion: v1alpha2.ComponentUnion{
|
||||
Image: &v1alpha2.ImageComponent{
|
||||
Image: params.Image,
|
||||
},
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmp.Attributes = *params.Attributes
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
type KubernetesComponentParams struct {
|
||||
Name string
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
Kubernetes *v1alpha2.KubernetesComponent
|
||||
}
|
||||
|
||||
func GetKubernetesComponent(params KubernetesComponentParams) v1alpha2.Component {
|
||||
cmp := v1alpha2.Component{
|
||||
Name: params.Name,
|
||||
ComponentUnion: v1alpha2.ComponentUnion{
|
||||
Kubernetes: params.Kubernetes,
|
||||
},
|
||||
}
|
||||
if params.Attributes != nil {
|
||||
cmp.Attributes = *params.Attributes
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
77
pkg/libdevfile/handler_mock.go
Normal file
77
pkg/libdevfile/handler_mock.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: pkg/libdevfile/libdevfile.go
|
||||
|
||||
// Package libdevfile is a generated GoMock package.
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
v1alpha2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockHandler is a mock of Handler interface.
|
||||
type MockHandler struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHandlerMockRecorder
|
||||
}
|
||||
|
||||
// MockHandlerMockRecorder is the mock recorder for MockHandler.
|
||||
type MockHandlerMockRecorder struct {
|
||||
mock *MockHandler
|
||||
}
|
||||
|
||||
// NewMockHandler creates a new mock instance.
|
||||
func NewMockHandler(ctrl *gomock.Controller) *MockHandler {
|
||||
mock := &MockHandler{ctrl: ctrl}
|
||||
mock.recorder = &MockHandlerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHandler) EXPECT() *MockHandlerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// ApplyImage mocks base method.
|
||||
func (m *MockHandler) ApplyImage(image v1alpha2.Component) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ApplyImage", image)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ApplyImage indicates an expected call of ApplyImage.
|
||||
func (mr *MockHandlerMockRecorder) ApplyImage(image interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyImage", reflect.TypeOf((*MockHandler)(nil).ApplyImage), image)
|
||||
}
|
||||
|
||||
// ApplyKubernetes mocks base method.
|
||||
func (m *MockHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ApplyKubernetes", kubernetes)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ApplyKubernetes indicates an expected call of ApplyKubernetes.
|
||||
func (mr *MockHandlerMockRecorder) ApplyKubernetes(kubernetes interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubernetes", reflect.TypeOf((*MockHandler)(nil).ApplyKubernetes), kubernetes)
|
||||
}
|
||||
|
||||
// Execute mocks base method.
|
||||
func (m *MockHandler) Execute(command v1alpha2.Command) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Execute", command)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Execute indicates an expected call of Execute.
|
||||
func (mr *MockHandlerMockRecorder) Execute(command interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockHandler)(nil).Execute), command)
|
||||
}
|
||||
121
pkg/libdevfile/libdevfile.go
Normal file
121
pkg/libdevfile/libdevfile.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
|
||||
)
|
||||
|
||||
type Handler interface {
|
||||
ApplyImage(image v1alpha2.Component) error
|
||||
ApplyKubernetes(kubernetes v1alpha2.Component) error
|
||||
Execute(command v1alpha2.Command) error
|
||||
}
|
||||
|
||||
// Deploy executes the default Deploy command of the devfile
|
||||
func Deploy(devfileObj parser.DevfileObj, handler Handler) error {
|
||||
deployCommand, err := getDefaultCommand(devfileObj, v1alpha2.DeployCommandGroupKind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return executeCommand(devfileObj, deployCommand, handler)
|
||||
}
|
||||
|
||||
// getDefaultCommand returns the default command of the given kind in the devfile.
|
||||
// If only one command of the kind exists, it is returned, even if it is not marked as default
|
||||
func getDefaultCommand(devfileObj parser.DevfileObj, kind v1alpha2.CommandGroupKind) (v1alpha2.Command, error) {
|
||||
groupCmds, err := devfileObj.Data.GetCommands(common.DevfileOptions{
|
||||
CommandOptions: common.CommandOptions{
|
||||
CommandGroupKind: kind,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return v1alpha2.Command{}, err
|
||||
}
|
||||
if len(groupCmds) == 0 {
|
||||
return v1alpha2.Command{}, NewNoCommandFoundError(kind)
|
||||
}
|
||||
if len(groupCmds) > 1 {
|
||||
var found bool
|
||||
var foundGroupCmd v1alpha2.Command
|
||||
for _, groupCmd := range groupCmds {
|
||||
group := common.GetGroup(groupCmd)
|
||||
if group == nil {
|
||||
continue
|
||||
}
|
||||
if group.IsDefault != nil && *group.IsDefault {
|
||||
if found {
|
||||
return v1alpha2.Command{}, NewMoreThanOneDefaultCommandFoundError(kind)
|
||||
}
|
||||
found = true
|
||||
foundGroupCmd = groupCmd
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return v1alpha2.Command{}, NewNoDefaultCommandFoundError(kind)
|
||||
}
|
||||
return foundGroupCmd, nil
|
||||
}
|
||||
return groupCmds[0], nil
|
||||
}
|
||||
|
||||
// executeCommand executes a specific command of a devfile using handler as backend
|
||||
func executeCommand(devfileObj parser.DevfileObj, command v1alpha2.Command, handler Handler) error {
|
||||
cmd, err := newCommand(devfileObj, command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.Execute(handler)
|
||||
}
|
||||
|
||||
func HasPostStartEvents(devfileObj parser.DevfileObj) bool {
|
||||
postStartEvents := devfileObj.Data.GetEvents().PostStart
|
||||
return len(postStartEvents) > 0
|
||||
}
|
||||
|
||||
func HasPreStopEvents(devfileObj parser.DevfileObj) bool {
|
||||
preStopEvents := devfileObj.Data.GetEvents().PreStop
|
||||
return len(preStopEvents) > 0
|
||||
}
|
||||
|
||||
func ExecPostStartEvents(devfileObj parser.DevfileObj, componentName string, handler Handler) error {
|
||||
postStartEvents := devfileObj.Data.GetEvents().PostStart
|
||||
return execDevfileEvent(devfileObj, postStartEvents, handler)
|
||||
}
|
||||
|
||||
func ExecPreStopEvents(devfileObj parser.DevfileObj, componentName string, handler Handler) error {
|
||||
preStopEvents := devfileObj.Data.GetEvents().PreStop
|
||||
return execDevfileEvent(devfileObj, preStopEvents, handler)
|
||||
}
|
||||
|
||||
// execDevfileEvent receives a Devfile Event (PostStart, PreStop etc.) and loops through them
|
||||
// Each Devfile Command associated with the given event is retrieved, and executed in the container specified
|
||||
// in the command
|
||||
func execDevfileEvent(devfileObj parser.DevfileObj, events []string, handler Handler) error {
|
||||
if len(events) > 0 {
|
||||
commandMap, err := allCommandsMap(devfileObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, commandName := range events {
|
||||
command, ok := commandMap[commandName]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to find devfile command %q", commandName)
|
||||
}
|
||||
|
||||
c, err := newCommand(devfileObj, command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute command in container
|
||||
err = c.Execute(handler)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to execute devfile command %q: %w", commandName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
250
pkg/libdevfile/libdevfile_test.go
Normal file
250
pkg/libdevfile/libdevfile_test.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package libdevfile
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/devfile/library/pkg/devfile/parser/data"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile/generator"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func Test_getDefaultCommand(t *testing.T) {
|
||||
|
||||
runDefault1 := generator.GetExecCommand(generator.ExecCommandParams{
|
||||
Kind: v1alpha2.RunCommandGroupKind,
|
||||
Id: "run-default-1",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
})
|
||||
deployDefault1 := generator.GetCompositeCommand(generator.CompositeCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deploy-default-1",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
})
|
||||
deployDefault2 := generator.GetExecCommand(generator.ExecCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deploy-default-2",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
})
|
||||
deployNoDefault1 := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deploy-no-default-1",
|
||||
IsDefault: pointer.BoolPtr(false),
|
||||
})
|
||||
deployUnspecDefault1 := generator.GetCompositeCommand(generator.CompositeCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deploy-unspec-default-1",
|
||||
IsDefault: nil,
|
||||
})
|
||||
|
||||
type args struct {
|
||||
devfileObj func() parser.DevfileObj
|
||||
kind v1alpha2.CommandGroupKind
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want v1alpha2.Command
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "a single deploy command, default",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: nil,
|
||||
want: deployDefault1,
|
||||
},
|
||||
{
|
||||
name: "a single deploy command, not default",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployNoDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: nil,
|
||||
want: deployNoDefault1,
|
||||
},
|
||||
{
|
||||
name: "a single deploy command, unspecified default",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployUnspecDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: nil,
|
||||
want: deployUnspecDefault1,
|
||||
},
|
||||
{
|
||||
name: "several deploy commands, only one is default",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployDefault1, deployNoDefault1, deployUnspecDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: nil,
|
||||
want: deployDefault1,
|
||||
},
|
||||
{
|
||||
name: "no deploy command",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: NewNoCommandFoundError(v1alpha2.DeployCommandGroupKind),
|
||||
},
|
||||
{
|
||||
name: "two deploy default commands",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployDefault1, deployDefault2})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: NewMoreThanOneDefaultCommandFoundError(v1alpha2.DeployCommandGroupKind),
|
||||
},
|
||||
{
|
||||
name: "two deploy commands, no one is default",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{runDefault1, deployNoDefault1, deployUnspecDefault1})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
kind: v1alpha2.DeployCommandGroupKind,
|
||||
},
|
||||
wantErr: NewNoDefaultCommandFoundError(v1alpha2.DeployCommandGroupKind),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := getDefaultCommand(tt.args.devfileObj(), tt.args.kind)
|
||||
if err != tt.wantErr {
|
||||
t.Errorf("getDefaultCommand() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("getDefaultCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploy(t *testing.T) {
|
||||
deployDefault1 := generator.GetCompositeCommand(generator.CompositeCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deploy-default-1",
|
||||
IsDefault: pointer.BoolPtr(true),
|
||||
Commands: []string{"image-command", "deployment-command", "service-command"},
|
||||
})
|
||||
applyImageCommand := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "image-command",
|
||||
IsDefault: pointer.BoolPtr(false),
|
||||
Component: "image-component",
|
||||
})
|
||||
applyDeploymentCommand := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "deployment-command",
|
||||
IsDefault: pointer.BoolPtr(false),
|
||||
Component: "deployment-component",
|
||||
})
|
||||
applyServiceCommand := generator.GetApplyCommand(generator.ApplyCommandParams{
|
||||
Kind: v1alpha2.DeployCommandGroupKind,
|
||||
Id: "service-command",
|
||||
IsDefault: pointer.BoolPtr(false),
|
||||
Component: "service-component",
|
||||
})
|
||||
|
||||
imageComponent := generator.GetImageComponent(generator.ImageComponentParams{
|
||||
Name: "image-component",
|
||||
Image: v1alpha2.Image{
|
||||
ImageName: "an-image-name",
|
||||
},
|
||||
})
|
||||
deploymentComponent := generator.GetKubernetesComponent(generator.KubernetesComponentParams{
|
||||
Name: "deployment-component",
|
||||
Kubernetes: &v1alpha2.KubernetesComponent{},
|
||||
})
|
||||
serviceComponent := generator.GetKubernetesComponent(generator.KubernetesComponentParams{
|
||||
Name: "service-component",
|
||||
Kubernetes: &v1alpha2.KubernetesComponent{},
|
||||
})
|
||||
|
||||
type args struct {
|
||||
devfileObj func() parser.DevfileObj
|
||||
handler func(ctrl *gomock.Controller) Handler
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "deploy an image and two kubernetes components",
|
||||
args: args{
|
||||
devfileObj: func() parser.DevfileObj {
|
||||
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
|
||||
_ = data.AddCommands([]v1alpha2.Command{deployDefault1, applyImageCommand, applyDeploymentCommand, applyServiceCommand})
|
||||
_ = data.AddComponents([]v1alpha2.Component{imageComponent, deploymentComponent, serviceComponent})
|
||||
return parser.DevfileObj{
|
||||
Data: data,
|
||||
}
|
||||
},
|
||||
handler: func(ctrl *gomock.Controller) Handler {
|
||||
h := NewMockHandler(ctrl)
|
||||
h.EXPECT().ApplyImage(imageComponent)
|
||||
h.EXPECT().ApplyKubernetes(deploymentComponent)
|
||||
h.EXPECT().ApplyKubernetes(serviceComponent)
|
||||
return h
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
if err := Deploy(tt.args.devfileObj(), tt.args.handler(ctrl)); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Deploy() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
17
pkg/libdevfile/types.go
Normal file
17
pkg/libdevfile/types.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package libdevfile
|
||||
|
||||
type DevfileEventType string
|
||||
|
||||
const (
|
||||
// PreStart is a devfile event
|
||||
PreStart DevfileEventType = "preStart"
|
||||
|
||||
// PostStart is a devfile event
|
||||
PostStart DevfileEventType = "postStart"
|
||||
|
||||
// PreStop is a devfile event
|
||||
PreStop DevfileEventType = "preStop"
|
||||
|
||||
// PostStop is a devfile event
|
||||
PostStop DevfileEventType = "postStop"
|
||||
)
|
||||
@@ -2,6 +2,7 @@ package build_images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/devfile/image"
|
||||
"github.com/redhat-developer/odo/pkg/odo/cmdline"
|
||||
@@ -58,7 +59,9 @@ func (o *BuildImagesOptions) Validate() (err error) {
|
||||
|
||||
// Run contains the logic for the odo command
|
||||
func (o *BuildImagesOptions) Run() (err error) {
|
||||
return image.BuildPushImages(o.Context, o.pushFlag)
|
||||
devfileObj := o.Context.EnvSpecificInfo.GetDevfileObj()
|
||||
path := filepath.Dir(o.Context.EnvSpecificInfo.GetDevfilePath())
|
||||
return image.BuildPushImages(devfileObj, path, o.pushFlag)
|
||||
}
|
||||
|
||||
// NewCmdLogin implements the odo command
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/devfile"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/component"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/consts"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
projectCmd "github.com/redhat-developer/odo/pkg/odo/cli/project"
|
||||
"github.com/redhat-developer/odo/pkg/odo/cli/ui"
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/redhat-developer/odo/pkg/odo/util/completion"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
dfutil "github.com/devfile/library/pkg/util"
|
||||
|
||||
"k8s.io/klog"
|
||||
@@ -98,7 +99,7 @@ func (do *DeleteOptions) Run() (err error) {
|
||||
err = do.DevfileUnDeploy()
|
||||
if err != nil {
|
||||
// if there is no component in the devfile to undeploy or if the devfile is non-existent, then skip the error log
|
||||
if errors.Is(err, &component.NoDefaultDeployCommandFoundError{}) || !devfileExists {
|
||||
if errors.Is(err, libdevfile.NewNoCommandFoundError(v1alpha2.DeployCommandGroupKind)) || !devfileExists {
|
||||
log.Printf("no kubernetes component to un-deploy")
|
||||
} else {
|
||||
log.Errorf("error occurred while un-deploying, cause: %v", err)
|
||||
|
||||
@@ -2,21 +2,28 @@ package component
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
dfutil "github.com/devfile/library/pkg/util"
|
||||
"github.com/redhat-developer/odo/pkg/devfile"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions"
|
||||
|
||||
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
|
||||
devfilefs "github.com/devfile/library/pkg/testingutil/filesystem"
|
||||
dfutil "github.com/devfile/library/pkg/util"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
componentlabels "github.com/redhat-developer/odo/pkg/component/labels"
|
||||
"github.com/redhat-developer/odo/pkg/devfile"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/libdevfile"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/machineoutput"
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions"
|
||||
"github.com/redhat-developer/odo/pkg/service"
|
||||
)
|
||||
|
||||
// DevfilePush has the logic to perform the required actions for a given devfile
|
||||
@@ -114,23 +121,9 @@ func (po *PushOptions) devfilePushInner() (err error) {
|
||||
|
||||
// DevfileUnDeploy undeploys the devfile kubernetes components
|
||||
func (do *DeleteOptions) DevfileUnDeploy() error {
|
||||
devObj, err := devfile.ParseAndValidateFromFile(do.GetDevfilePath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
componentName := do.EnvSpecificInfo.GetName()
|
||||
|
||||
kc := kubernetes.KubernetesContext{
|
||||
Namespace: do.KClient.GetCurrentNamespace(),
|
||||
}
|
||||
|
||||
devfileHandler, err := adapters.NewComponentAdapter(componentName, do.contextFlag, do.GetApplication(), devObj, kc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return devfileHandler.UnDeploy()
|
||||
devfileObj := do.EnvSpecificInfo.GetDevfileObj()
|
||||
undeployHandler := newUndeployHandler(filepath.Dir(do.EnvSpecificInfo.GetDevfilePath()), do.KClient)
|
||||
return libdevfile.Deploy(devfileObj, undeployHandler)
|
||||
}
|
||||
|
||||
// DevfileComponentDelete deletes the devfile component
|
||||
@@ -139,18 +132,44 @@ func (do *DeleteOptions) DevfileComponentDelete() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
componentName := do.EnvSpecificInfo.GetName()
|
||||
|
||||
kc := kubernetes.KubernetesContext{
|
||||
Namespace: do.KClient.GetCurrentNamespace(),
|
||||
}
|
||||
|
||||
labels := componentlabels.GetLabels(componentName, do.EnvSpecificInfo.GetApplication(), false)
|
||||
devfileHandler, err := adapters.NewComponentAdapter(componentName, do.contextFlag, do.GetApplication(), devObj, kc)
|
||||
return component.Delete(do.KClient, devObj, componentName, do.GetApplication(), labels, do.showLogFlag, do.waitFlag)
|
||||
}
|
||||
|
||||
type undeployHandler struct {
|
||||
path string
|
||||
kubeClient kclient.ClientInterface
|
||||
}
|
||||
|
||||
func newUndeployHandler(path string, kubeClient kclient.ClientInterface) *undeployHandler {
|
||||
return &undeployHandler{
|
||||
path: path,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *undeployHandler) ApplyImage(image v1alpha2.Component) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *undeployHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
|
||||
// Parse the component's Kubernetes manifest
|
||||
u, err := service.GetK8sComponentAsUnstructured(kubernetes.Kubernetes, o.path, devfilefs.DefaultFs{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return devfileHandler.Delete(labels, do.showLogFlag, do.waitFlag)
|
||||
// Get the REST mappings
|
||||
gvr, err := o.kubeClient.GetRestMappingFromUnstructured(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Un-deploying the Kubernetes %s: %s", u.GetKind(), u.GetName())
|
||||
// Un-deploy the K8s manifest
|
||||
return o.kubeClient.DeleteDynamicResource(u.GetName(), gvr.Resource.Group, gvr.Resource.Version, gvr.Resource.Resource)
|
||||
}
|
||||
|
||||
func (o *undeployHandler) Execute(command v1alpha2.Command) error {
|
||||
return errors.New("Exec command is not implemented for Deploy")
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/component"
|
||||
"github.com/redhat-developer/odo/pkg/devfile"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/common"
|
||||
@@ -111,7 +112,7 @@ func (wo *WatchOptions) Validate() (err error) {
|
||||
if wo.debugCommandFlag != "" && wo.EnvSpecificInfo != nil && wo.EnvSpecificInfo.GetRunMode() != envinfo.Debug {
|
||||
return fmt.Errorf("please start the component in debug mode using `odo push --debug` to use the --debug-command flag")
|
||||
}
|
||||
exists, err := wo.initialDevfileHandler.DoesComponentExist(wo.EnvSpecificInfo.GetName(), wo.GetApplication())
|
||||
exists, err := component.ComponentExists(wo.KClient, wo.EnvSpecificInfo.GetName(), wo.GetApplication())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/devfile/library/pkg/devfile"
|
||||
"github.com/devfile/library/pkg/devfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters"
|
||||
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/devfile/location"
|
||||
"github.com/redhat-developer/odo/pkg/envinfo"
|
||||
"github.com/redhat-developer/odo/pkg/odo/cli/component"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions/clientset"
|
||||
odoutil "github.com/redhat-developer/odo/pkg/odo/util"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
@@ -122,16 +123,10 @@ func (o *DeployOptions) Validate() error {
|
||||
|
||||
// Run contains the logic for the odo command
|
||||
func (o *DeployOptions) Run() error {
|
||||
platformContext := kubernetes.KubernetesContext{
|
||||
Namespace: o.KClient.GetCurrentNamespace(),
|
||||
}
|
||||
|
||||
devfileHandler, err := adapters.NewComponentAdapter(o.EnvSpecificInfo.GetName(), filepath.Dir(o.EnvSpecificInfo.GetDevfilePath()), o.GetApplication(), o.EnvSpecificInfo.GetDevfileObj(), platformContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return devfileHandler.Deploy()
|
||||
devfileObj := o.EnvSpecificInfo.GetDevfileObj()
|
||||
path := filepath.Dir(o.EnvSpecificInfo.GetDevfilePath())
|
||||
appName := o.GetApplication()
|
||||
return o.clientset.DeployClient.Deploy(devfileObj, path, appName)
|
||||
}
|
||||
|
||||
// NewCmdDeploy implements the odo command
|
||||
@@ -147,7 +142,7 @@ func NewCmdDeploy(name, fullName string) *cobra.Command {
|
||||
genericclioptions.GenericRun(o, cmd, args)
|
||||
},
|
||||
}
|
||||
clientset.Add(deployCmd, clientset.INIT)
|
||||
clientset.Add(deployCmd, clientset.INIT, clientset.DEPLOY)
|
||||
|
||||
// Add a defined annotation in order to appear in the help menu
|
||||
deployCmd.Annotations["command"] = "utility"
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/catalog"
|
||||
"github.com/redhat-developer/odo/pkg/deploy"
|
||||
_init "github.com/redhat-developer/odo/pkg/init"
|
||||
"github.com/redhat-developer/odo/pkg/init/registry"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
@@ -26,6 +27,8 @@ import (
|
||||
const (
|
||||
// CATALOG instantiates client for pkg/catalog
|
||||
CATALOG = "DEP_CATALOG"
|
||||
// DEPLOY instantiates client for pkg/deploy
|
||||
DEPLOY = "DEP_DEPLOY"
|
||||
// FILESYSTEM instantiates client for pkg/testingutil/filesystem
|
||||
FILESYSTEM = "DEP_FILESYSTEM"
|
||||
// INIT instantiates client for pkg/init
|
||||
@@ -48,6 +51,7 @@ const (
|
||||
// Clients will be created only once and be reused for sub-dependencies
|
||||
var subdeps map[string][]string = map[string][]string{
|
||||
CATALOG: {FILESYSTEM, PREFERENCE},
|
||||
DEPLOY: {KUBERNETES},
|
||||
INIT: {FILESYSTEM, PREFERENCE, REGISTRY, CATALOG},
|
||||
PROJECT: {KUBERNETES_NULLABLE},
|
||||
/* Add sub-dependencies here, if any */
|
||||
@@ -55,6 +59,7 @@ var subdeps map[string][]string = map[string][]string{
|
||||
|
||||
type Clientset struct {
|
||||
CatalogClient catalog.Client
|
||||
DeployClient deploy.Client
|
||||
FS filesystem.Filesystem
|
||||
InitClient _init.Client
|
||||
KubernetesClient kclient.ClientInterface
|
||||
@@ -111,6 +116,9 @@ func Fetch(command *cobra.Command) (*Clientset, error) {
|
||||
if isDefined(command, CATALOG) {
|
||||
dep.CatalogClient = catalog.NewCatalogClient(dep.FS, dep.PreferenceClient)
|
||||
}
|
||||
if isDefined(command, DEPLOY) {
|
||||
dep.DeployClient = deploy.NewDeployClient(dep.KubernetesClient)
|
||||
}
|
||||
if isDefined(command, INIT) {
|
||||
dep.InitClient = _init.NewInitClient(dep.FS, dep.PreferenceClient, dep.RegistryClient, dep.CatalogClient)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ mockgen -source=pkg/preference/preference.go \
|
||||
-package preference \
|
||||
-destination pkg/preference/mock.go
|
||||
|
||||
|
||||
mockgen -source=pkg/auth/interface.go \
|
||||
-package auth \
|
||||
-destination pkg/auth/mock.go
|
||||
@@ -59,3 +58,12 @@ mockgen -source=pkg/init/interface.go \
|
||||
mockgen -source=pkg/init/registry/interface.go \
|
||||
-package registry \
|
||||
-destination pkg/init/registry/mock.go
|
||||
|
||||
mockgen -source=pkg/deploy/interface.go \
|
||||
-package deploy \
|
||||
-destination pkg/deploy/mock.go
|
||||
|
||||
mockgen -source=pkg/libdevfile/libdevfile.go \
|
||||
-package libdevfile \
|
||||
-destination pkg/libdevfile/handler_mock.go
|
||||
|
||||
|
||||
@@ -0,0 +1,116 @@
|
||||
commands:
|
||||
- exec:
|
||||
commandLine: npm install
|
||||
component: runtime
|
||||
group:
|
||||
isDefault: true
|
||||
kind: build
|
||||
workingDir: /project
|
||||
id: install
|
||||
- exec:
|
||||
commandLine: npm start
|
||||
component: runtime
|
||||
group:
|
||||
isDefault: true
|
||||
kind: run
|
||||
workingDir: /project
|
||||
id: run
|
||||
- exec:
|
||||
commandLine: npm run debug
|
||||
component: runtime
|
||||
group:
|
||||
isDefault: true
|
||||
kind: debug
|
||||
workingDir: /project
|
||||
id: debug
|
||||
- exec:
|
||||
commandLine: npm test
|
||||
component: runtime
|
||||
group:
|
||||
isDefault: true
|
||||
kind: test
|
||||
workingDir: /project
|
||||
id: test
|
||||
- id: build-image
|
||||
apply:
|
||||
component: outerloop-build
|
||||
- id: deployk8s
|
||||
apply:
|
||||
component: outerloop-deploy
|
||||
- id: deploy1
|
||||
composite:
|
||||
commands:
|
||||
- build-image
|
||||
- deployk8s
|
||||
group:
|
||||
kind: deploy
|
||||
isDefault: true
|
||||
- id: deploy2
|
||||
composite:
|
||||
commands:
|
||||
- deployk8s
|
||||
group:
|
||||
kind: deploy
|
||||
isDefault: false
|
||||
components:
|
||||
- container:
|
||||
endpoints:
|
||||
- name: http-3000
|
||||
targetPort: 3000
|
||||
image: registry.access.redhat.com/ubi8/nodejs-14:latest
|
||||
memoryLimit: 1024Mi
|
||||
mountSources: true
|
||||
sourceMapping: /project
|
||||
name: runtime
|
||||
- name: outerloop-build
|
||||
image:
|
||||
imageName: "{{CONTAINER_IMAGE}}"
|
||||
dockerfile:
|
||||
uri: ./Dockerfile
|
||||
buildContext: ${PROJECTS_ROOT}
|
||||
rootRequired: false
|
||||
|
||||
- name: outerloop-deploy
|
||||
kubernetes:
|
||||
inlined: |
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: my-component
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: node-app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: node-app
|
||||
spec:
|
||||
containers:
|
||||
- name: main
|
||||
image: {{CONTAINER_IMAGE}}
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
metadata:
|
||||
description: Stack with Node.js 14
|
||||
displayName: Node.js Runtime
|
||||
icon: https://nodejs.org/static/images/logos/nodejs-new-pantone-black.svg
|
||||
language: javascript
|
||||
name: nodejs-prj1-api-abhz
|
||||
projectType: nodejs
|
||||
tags:
|
||||
- NodeJS
|
||||
- Express
|
||||
- ubi8
|
||||
version: 1.0.1
|
||||
schemaVersion: 2.2.0
|
||||
starterProjects:
|
||||
- git:
|
||||
remotes:
|
||||
origin: https://github.com/odo-devfiles/nodejs-ex.git
|
||||
name: nodejs-starter
|
||||
variables:
|
||||
CONTAINER_IMAGE: quay.io/unknown-account/myimage
|
||||
@@ -44,4 +44,26 @@ var _ = Describe("odo devfile deploy command tests", func() {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
When("using a devfile.yaml containing two deploy commands", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
helper.CopyExample(filepath.Join("source", "nodejs"), commonVar.Context)
|
||||
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-two-deploy-commands.yaml"), path.Join(commonVar.Context, "devfile.yaml"))
|
||||
})
|
||||
AfterEach(func() {
|
||||
helper.Cmd("odo", "delete", "-a").ShouldPass()
|
||||
})
|
||||
It("should run odo deploy", func() {
|
||||
stdout := helper.Cmd("odo", "deploy").AddEnv("PODMAN_CMD=echo").ShouldPass().Out()
|
||||
By("building and pushing image to registry", func() {
|
||||
Expect(stdout).To(ContainSubstring("build -t quay.io/unknown-account/myimage -f " + filepath.Join(commonVar.Context, "Dockerfile ") + commonVar.Context))
|
||||
Expect(stdout).To(ContainSubstring("push quay.io/unknown-account/myimage"))
|
||||
})
|
||||
By("deploying a deployment with the built image", func() {
|
||||
out := commonVar.CliRunner.Run("get", "deployment", "my-component", "-n", commonVar.Project, "-o", `jsonpath="{.spec.template.spec.containers[0].image}"`).Wait().Out.Contents()
|
||||
Expect(out).To(ContainSubstring("quay.io/unknown-account/myimage"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user