mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
* initial work on adding AppName * completed the implementation and tests * resolved failing tests * reverting an un-necessary change * dryed the tests * added test for having both devfile and s2i together * uncommented some stuff * skip s2i test on kuberenetes * resolved charlie's comments * addressed mrinal's comments * resolved the panic * use client and dc support for specific cases * resolved final tests and odo list now worked for unpushed components * resolved failing unit tests * update the component adapter in other places * resolved all comments * removed the created context at the end of the test * addressed tomas's comment * resolved all comments * updated all devfiles with proper name * resolved rebase * addressed a missed error and more validation in the tests * use random cmpName for tests
631 lines
21 KiB
Go
631 lines
21 KiB
Go
package component
|
|
|
|
import (
|
|
"fmt"
|
|
"io"
|
|
"reflect"
|
|
"strings"
|
|
|
|
componentlabels "github.com/openshift/odo/pkg/component/labels"
|
|
"github.com/openshift/odo/pkg/exec"
|
|
|
|
corev1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"github.com/fatih/color"
|
|
"github.com/pkg/errors"
|
|
"k8s.io/klog"
|
|
|
|
"github.com/openshift/odo/pkg/component"
|
|
"github.com/openshift/odo/pkg/config"
|
|
"github.com/openshift/odo/pkg/devfile/adapters/common"
|
|
"github.com/openshift/odo/pkg/devfile/adapters/kubernetes/storage"
|
|
"github.com/openshift/odo/pkg/devfile/adapters/kubernetes/utils"
|
|
versionsCommon "github.com/openshift/odo/pkg/devfile/parser/data/common"
|
|
"github.com/openshift/odo/pkg/kclient"
|
|
"github.com/openshift/odo/pkg/log"
|
|
"github.com/openshift/odo/pkg/machineoutput"
|
|
odoutil "github.com/openshift/odo/pkg/odo/util"
|
|
"github.com/openshift/odo/pkg/sync"
|
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
|
)
|
|
|
|
// New instantiantes a component adapter
|
|
func New(adapterContext common.AdapterContext, client kclient.Client) Adapter {
|
|
|
|
var loggingClient machineoutput.MachineEventLoggingClient
|
|
|
|
if log.IsJSON() {
|
|
loggingClient = machineoutput.NewConsoleMachineEventLoggingClient()
|
|
} else {
|
|
loggingClient = machineoutput.NewNoOpMachineEventLoggingClient()
|
|
}
|
|
|
|
return Adapter{
|
|
Client: client,
|
|
AdapterContext: adapterContext,
|
|
machineEventLogger: loggingClient,
|
|
}
|
|
}
|
|
|
|
// Adapter is a component adapter implementation for Kubernetes
|
|
type Adapter struct {
|
|
Client kclient.Client
|
|
common.AdapterContext
|
|
devfileInitCmd string
|
|
devfileBuildCmd string
|
|
devfileRunCmd string
|
|
devfileDebugCmd string
|
|
devfileDebugPort int
|
|
machineEventLogger machineoutput.MachineEventLoggingClient
|
|
}
|
|
|
|
// Push updates the component if a matching component exists or creates one if it doesn't exist
|
|
// Once the component has started, it will sync the source code to it.
|
|
func (a Adapter) Push(parameters common.PushParameters) (err error) {
|
|
componentExists, err := utils.ComponentExists(a.Client, a.ComponentName)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unable to determine if component %s exists", a.ComponentName)
|
|
}
|
|
|
|
a.devfileInitCmd = parameters.DevfileInitCmd
|
|
a.devfileBuildCmd = parameters.DevfileBuildCmd
|
|
a.devfileRunCmd = parameters.DevfileRunCmd
|
|
a.devfileDebugCmd = parameters.DevfileDebugCmd
|
|
a.devfileDebugPort = parameters.DebugPort
|
|
|
|
podChanged := false
|
|
var podName string
|
|
|
|
// If the component already exists, retrieve the pod's name before it's potentially updated
|
|
if componentExists {
|
|
pod, err := a.waitAndGetComponentPod(true)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unable to get pod for component %s", a.ComponentName)
|
|
}
|
|
podName = pod.GetName()
|
|
}
|
|
|
|
// Validate the devfile build and run commands
|
|
log.Info("\nValidation")
|
|
s := log.Spinner("Validating the devfile")
|
|
pushDevfileCommands, err := common.ValidateAndGetPushDevfileCommands(a.Devfile.Data, a.devfileInitCmd, a.devfileBuildCmd, a.devfileRunCmd)
|
|
if err != nil {
|
|
s.End(false)
|
|
return errors.Wrap(err, "failed to validate devfile build and run commands")
|
|
}
|
|
s.End(true)
|
|
|
|
log.Infof("\nCreating Kubernetes resources for component %s", a.ComponentName)
|
|
|
|
if parameters.Debug {
|
|
pushDevfileDebugCommands, err := common.ValidateAndGetDebugDevfileCommands(a.Devfile.Data, a.devfileDebugCmd)
|
|
if err != nil {
|
|
return fmt.Errorf("debug command is not valid")
|
|
}
|
|
pushDevfileCommands[versionsCommon.DebugCommandGroupType] = pushDevfileDebugCommands
|
|
parameters.ForceBuild = true
|
|
}
|
|
|
|
err = a.createOrUpdateComponent(componentExists)
|
|
if err != nil {
|
|
return errors.Wrap(err, "unable to create or update component")
|
|
}
|
|
|
|
_, err = a.Client.WaitForDeploymentRollout(a.ComponentName)
|
|
if err != nil {
|
|
return errors.Wrap(err, "error while waiting for deployment rollout")
|
|
}
|
|
|
|
// Wait for Pod to be in running state otherwise we can't sync data or exec commands to it.
|
|
pod, err := a.waitAndGetComponentPod(true)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unable to get pod for component %s", a.ComponentName)
|
|
}
|
|
|
|
err = component.ApplyConfig(nil, &a.Client, config.LocalConfigInfo{}, parameters.EnvSpecificInfo, color.Output, componentExists)
|
|
if err != nil {
|
|
odoutil.LogErrorAndExit(err, "Failed to update config to component deployed.")
|
|
}
|
|
|
|
// Compare the name of the pod with the one before the rollout. If they differ, it means there's a new pod and a force push is required
|
|
if componentExists && podName != pod.GetName() {
|
|
podChanged = true
|
|
}
|
|
|
|
// Find at least one pod with the source volume mounted, error out if none can be found
|
|
containerName, sourceMount, err := getFirstContainerWithSourceVolume(pod.Spec.Containers)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "error while retrieving container from pod %s with a mounted project volume", podName)
|
|
}
|
|
|
|
log.Infof("\nSyncing to component %s", a.ComponentName)
|
|
// Get a sync adapter. Check if project files have changed and sync accordingly
|
|
syncAdapter := sync.New(a.AdapterContext, &a.Client)
|
|
compInfo := common.ComponentInfo{
|
|
ContainerName: containerName,
|
|
PodName: pod.GetName(),
|
|
SourceMount: sourceMount,
|
|
}
|
|
syncParams := common.SyncParameters{
|
|
PushParams: parameters,
|
|
CompInfo: compInfo,
|
|
ComponentExists: componentExists,
|
|
PodChanged: podChanged,
|
|
}
|
|
execRequired, err := syncAdapter.SyncFiles(syncParams)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "Failed to sync to component with name %s", a.ComponentName)
|
|
}
|
|
|
|
// PostStart events from the devfile will only be executed when the component
|
|
// didn't previously exist
|
|
postStartEvents := a.Devfile.Data.GetEvents().PostStart
|
|
if !componentExists && len(postStartEvents) > 0 {
|
|
// log only when there are post start events present in devfile
|
|
log.Infof("\nExecuting postStart event commands for component %s", a.ComponentName)
|
|
err = a.execDevfileEvent(postStartEvents, pod.GetName())
|
|
if err != nil {
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if execRequired {
|
|
log.Infof("\nExecuting devfile commands for component %s", a.ComponentName)
|
|
err = a.execDevfile(pushDevfileCommands, componentExists, parameters.Show, pod.GetName(), pod.Spec.Containers, parameters.Debug)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Test runs the devfile test command
|
|
func (a Adapter) Test(testCmd string, show bool) (err error) {
|
|
pod, err := a.Client.GetPodUsingComponentName(a.ComponentName)
|
|
if err != nil {
|
|
return fmt.Errorf("error occurred while getting the pod: %w", err)
|
|
}
|
|
if pod.Status.Phase != corev1.PodRunning {
|
|
return fmt.Errorf("pod for component %s is not running", a.ComponentName)
|
|
}
|
|
|
|
log.Infof("\nExecuting devfile test command for component %s", a.ComponentName)
|
|
|
|
testCommand, err := common.ValidateAndGetTestDevfileCommands(a.Devfile.Data, testCmd)
|
|
if err != nil {
|
|
return errors.Wrap(err, "failed to validate devfile test command")
|
|
}
|
|
err = a.execTestCmd(testCommand, pod.GetName(), show)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "failed to execute devfile commands for component %s", a.ComponentName)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// DoesComponentExist returns true if a component with the specified name exists, false otherwise
|
|
func (a Adapter) DoesComponentExist(cmpName string) (bool, error) {
|
|
return utils.ComponentExists(a.Client, cmpName)
|
|
}
|
|
|
|
func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) {
|
|
componentName := a.ComponentName
|
|
|
|
componentType := strings.TrimSuffix(a.AdapterContext.Devfile.Data.GetMetadata().Name, "-")
|
|
|
|
labels := componentlabels.GetLabels(componentName, a.AppName, true)
|
|
labels["component"] = componentName
|
|
labels[componentlabels.ComponentTypeLabel] = componentType
|
|
|
|
containers, err := utils.GetContainers(a.Devfile)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(containers) == 0 {
|
|
return fmt.Errorf("No valid components found in the devfile")
|
|
}
|
|
|
|
containers, err = utils.UpdateContainersWithSupervisord(a.Devfile, containers, a.devfileRunCmd, a.devfileDebugCmd, a.devfileDebugPort)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
objectMeta := kclient.CreateObjectMeta(componentName, a.Client.Namespace, labels, nil)
|
|
podTemplateSpec := kclient.GeneratePodTemplateSpec(objectMeta, containers)
|
|
|
|
kclient.AddBootstrapSupervisordInitContainer(podTemplateSpec)
|
|
|
|
componentAliasToVolumes := common.GetVolumes(a.Devfile)
|
|
|
|
var uniqueStorages []common.Storage
|
|
volumeNameToPVCName := make(map[string]string)
|
|
processedVolumes := make(map[string]bool)
|
|
|
|
// Get a list of all the unique volume names and generate their PVC names
|
|
for _, volumes := range componentAliasToVolumes {
|
|
for _, vol := range volumes {
|
|
if _, ok := processedVolumes[vol.Name]; !ok {
|
|
processedVolumes[vol.Name] = true
|
|
|
|
// Generate the PVC Names
|
|
klog.V(4).Infof("Generating PVC name for %v", vol.Name)
|
|
generatedPVCName, err := storage.GeneratePVCNameFromDevfileVol(vol.Name, componentName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Check if we have an existing PVC with the labels, overwrite the generated name with the existing name if present
|
|
existingPVCName, err := storage.GetExistingPVC(&a.Client, vol.Name, componentName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(existingPVCName) > 0 {
|
|
klog.V(4).Infof("Found an existing PVC for %v, PVC %v will be re-used", vol.Name, existingPVCName)
|
|
generatedPVCName = existingPVCName
|
|
}
|
|
|
|
pvc := common.Storage{
|
|
Name: generatedPVCName,
|
|
Volume: vol,
|
|
}
|
|
uniqueStorages = append(uniqueStorages, pvc)
|
|
volumeNameToPVCName[vol.Name] = generatedPVCName
|
|
}
|
|
}
|
|
}
|
|
|
|
// Add PVC and Volume Mounts to the podTemplateSpec
|
|
err = kclient.AddPVCAndVolumeMount(podTemplateSpec, volumeNameToPVCName, componentAliasToVolumes)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
deploymentSpec := kclient.GenerateDeploymentSpec(*podTemplateSpec, map[string]string{
|
|
"component": componentName,
|
|
})
|
|
var containerPorts []corev1.ContainerPort
|
|
for _, c := range deploymentSpec.Template.Spec.Containers {
|
|
if len(containerPorts) == 0 {
|
|
containerPorts = c.Ports
|
|
} else {
|
|
containerPorts = append(containerPorts, c.Ports...)
|
|
}
|
|
}
|
|
serviceSpec := kclient.GenerateServiceSpec(objectMeta.Name, containerPorts)
|
|
klog.V(4).Infof("Creating deployment %v", deploymentSpec.Template.GetName())
|
|
klog.V(4).Infof("The component name is %v", componentName)
|
|
|
|
if componentExists {
|
|
// If the component already exists, get the resource version of the deploy before updating
|
|
klog.V(4).Info("The component already exists, attempting to update it")
|
|
deployment, err := a.Client.UpdateDeployment(*deploymentSpec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
klog.V(4).Infof("Successfully updated component %v", componentName)
|
|
oldSvc, err := a.Client.KubeClient.CoreV1().Services(a.Client.Namespace).Get(componentName, metav1.GetOptions{})
|
|
objectMetaTemp := objectMeta
|
|
ownerReference := kclient.GenerateOwnerReference(deployment)
|
|
objectMetaTemp.OwnerReferences = append(objectMeta.OwnerReferences, ownerReference)
|
|
if err != nil {
|
|
// no old service was found, create a new one
|
|
if len(serviceSpec.Ports) > 0 {
|
|
_, err = a.Client.CreateService(objectMetaTemp, *serviceSpec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
klog.V(4).Infof("Successfully created Service for component %s", componentName)
|
|
}
|
|
} else {
|
|
if len(serviceSpec.Ports) > 0 {
|
|
serviceSpec.ClusterIP = oldSvc.Spec.ClusterIP
|
|
objectMetaTemp.ResourceVersion = oldSvc.GetResourceVersion()
|
|
_, err = a.Client.UpdateService(objectMetaTemp, *serviceSpec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
klog.V(4).Infof("Successfully update Service for component %s", componentName)
|
|
} else {
|
|
err = a.Client.KubeClient.CoreV1().Services(a.Client.Namespace).Delete(componentName, &metav1.DeleteOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
deployment, err := a.Client.CreateDeployment(*deploymentSpec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
klog.V(4).Infof("Successfully created component %v", componentName)
|
|
ownerReference := kclient.GenerateOwnerReference(deployment)
|
|
objectMetaTemp := objectMeta
|
|
objectMetaTemp.OwnerReferences = append(objectMeta.OwnerReferences, ownerReference)
|
|
if len(serviceSpec.Ports) > 0 {
|
|
_, err = a.Client.CreateService(objectMetaTemp, *serviceSpec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
klog.V(4).Infof("Successfully created Service for component %s", componentName)
|
|
}
|
|
|
|
}
|
|
|
|
// Get the storage adapter and create the volumes if it does not exist
|
|
stoAdapter := storage.New(a.AdapterContext, a.Client)
|
|
err = stoAdapter.Create(uniqueStorages)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (a Adapter) waitAndGetComponentPod(hideSpinner bool) (*corev1.Pod, error) {
|
|
podSelector := fmt.Sprintf("component=%s", a.ComponentName)
|
|
watchOptions := metav1.ListOptions{
|
|
LabelSelector: podSelector,
|
|
}
|
|
// Wait for Pod to be in running state otherwise we can't sync data to it.
|
|
pod, err := a.Client.WaitAndGetPod(watchOptions, corev1.PodRunning, "Waiting for component to start", hideSpinner)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "error while waiting for pod %s", podSelector)
|
|
}
|
|
return pod, nil
|
|
}
|
|
|
|
// Executes all the commands from the devfile in order: init and build - which are both optional, and a compulsary run.
|
|
// Init only runs once when the component is created.
|
|
func (a Adapter) execDevfile(commandsMap common.PushCommandsMap, componentExists, show bool, podName string, containers []corev1.Container, isDebug bool) (err error) {
|
|
// If nothing has been passed, then the devfile is missing the required run command
|
|
if len(commandsMap) == 0 {
|
|
return errors.New(fmt.Sprint("error executing devfile commands - there should be at least 1 command"))
|
|
}
|
|
|
|
compInfo := common.ComponentInfo{
|
|
PodName: podName,
|
|
}
|
|
|
|
// only execute Init command, if it is first run of container.
|
|
if !componentExists {
|
|
|
|
// Get Init Command
|
|
command, ok := commandsMap[versionsCommon.InitCommandGroupType]
|
|
if ok {
|
|
compInfo.ContainerName = command.Exec.Component
|
|
err = exec.ExecuteDevfileCommandSynchronously(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Get Build Command
|
|
command, ok := commandsMap[versionsCommon.BuildCommandGroupType]
|
|
if ok {
|
|
compInfo.ContainerName = command.Exec.Component
|
|
err = exec.ExecuteDevfileCommandSynchronously(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
// Get Run or Debug Command
|
|
if isDebug {
|
|
command, ok = commandsMap[versionsCommon.DebugCommandGroupType]
|
|
} else {
|
|
command, ok = commandsMap[versionsCommon.RunCommandGroupType]
|
|
}
|
|
if ok {
|
|
klog.V(4).Infof("Executing devfile command %v", command.Exec.Id)
|
|
compInfo.ContainerName = command.Exec.Component
|
|
|
|
// Check if the devfile debug component containers have supervisord as the entrypoint.
|
|
// Start the supervisord if the odo component does not exist
|
|
if !componentExists {
|
|
err = a.InitRunContainerSupervisord(command.Exec.Component, podName, containers)
|
|
if err != nil {
|
|
a.machineEventLogger.ReportError(err, machineoutput.TimestampNow())
|
|
return
|
|
}
|
|
}
|
|
|
|
if componentExists && !common.IsRestartRequired(command) {
|
|
klog.V(4).Infof("restart:false, Not restarting %v Command", command.Exec.Id)
|
|
if isDebug {
|
|
err = exec.ExecuteDevfileDebugActionWithoutRestart(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
} else {
|
|
err = exec.ExecuteDevfileRunActionWithoutRestart(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
}
|
|
return
|
|
}
|
|
if isDebug {
|
|
err = exec.ExecuteDevfileDebugAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
} else {
|
|
err = exec.ExecuteDevfileRunAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
// TODO: Support Composite
|
|
// execDevfileEvent receives a Devfile Event (PostStart, PreStop etc.) and loops through them
|
|
// Each Devfile Command associated with the given event is retrieved, and executed in the container specified
|
|
// in the command
|
|
func (a Adapter) execDevfileEvent(events []string, podName string) error {
|
|
|
|
commandMap := common.GetCommandMap(a.Devfile.Data)
|
|
for _, commandName := range events {
|
|
// Convert commandName to lower because GetCommands converts Command.Exec.Id's to lower
|
|
command := commandMap[strings.ToLower(commandName)]
|
|
|
|
compInfo := common.ComponentInfo{
|
|
ContainerName: command.Exec.Component,
|
|
PodName: podName,
|
|
}
|
|
|
|
// If composite would go here & recursive loop
|
|
|
|
// Execute command in pod
|
|
err := exec.ExecuteDevfileCommandSynchronously(&a.Client, *command.Exec, command.Exec.Id, compInfo, false, a.machineEventLogger)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unable to execute devfile command "+commandName)
|
|
}
|
|
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Executes the test command in the pod
|
|
func (a Adapter) execTestCmd(testCmd versionsCommon.DevfileCommand, podName string, show bool) (err error) {
|
|
compInfo := common.ComponentInfo{
|
|
PodName: podName,
|
|
}
|
|
compInfo.ContainerName = testCmd.Exec.Component
|
|
err = exec.ExecuteDevfileCommandSynchronously(&a.Client, *testCmd.Exec, testCmd.Exec.Id, compInfo, show, a.machineEventLogger)
|
|
return
|
|
}
|
|
|
|
// InitRunContainerSupervisord initializes the supervisord in the container if
|
|
// the container has entrypoint that is not supervisord
|
|
func (a Adapter) InitRunContainerSupervisord(containerName, podName string, containers []corev1.Container) (err error) {
|
|
for _, container := range containers {
|
|
if container.Name == containerName && !reflect.DeepEqual(container.Command, []string{common.SupervisordBinaryPath}) {
|
|
command := []string{common.SupervisordBinaryPath, "-c", common.SupervisordConfFile, "-d"}
|
|
compInfo := common.ComponentInfo{
|
|
ContainerName: containerName,
|
|
PodName: podName,
|
|
}
|
|
err = exec.ExecuteCommand(&a.Client, compInfo, command, true, nil, nil)
|
|
}
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
// getFirstContainerWithSourceVolume returns the first container that set mountSources: true as well
|
|
// as the path to the source volume inside the container.
|
|
// Because the source volume is shared across all components that need it, we only need to sync once,
|
|
// so we only need to find one container. If no container was found, that means there's no
|
|
// container to sync to, so return an error
|
|
func getFirstContainerWithSourceVolume(containers []corev1.Container) (string, string, error) {
|
|
for _, c := range containers {
|
|
for _, vol := range c.VolumeMounts {
|
|
if vol.Name == kclient.OdoSourceVolume {
|
|
return c.Name, vol.MountPath, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return "", "", fmt.Errorf("In order to sync files, odo requires at least one component in a devfile to set 'mountSources: true'")
|
|
}
|
|
|
|
// Delete deletes the component
|
|
func (a Adapter) Delete(labels map[string]string) error {
|
|
spinner := log.Spinnerf("Deleting devfile component %s", a.ComponentName)
|
|
defer spinner.End(false)
|
|
|
|
componentExists, err := utils.ComponentExists(a.Client, a.ComponentName)
|
|
if kerrors.IsForbidden(err) {
|
|
klog.V(4).Infof("Resource for %s forbidden", a.ComponentName)
|
|
// log the error if it failed to determine if the component exists due to insufficient RBACs
|
|
spinner.End(false)
|
|
log.Warningf("%v", err)
|
|
return nil
|
|
} else if err != nil {
|
|
return errors.Wrapf(err, "unable to determine if component %s exists", a.ComponentName)
|
|
}
|
|
|
|
if !componentExists {
|
|
spinner.End(false)
|
|
log.Warningf("Component %s does not exist", a.ComponentName)
|
|
return nil
|
|
}
|
|
|
|
err = a.Client.DeleteDeployment(labels)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
spinner.End(true)
|
|
log.Successf("Successfully deleted component")
|
|
return nil
|
|
}
|
|
|
|
// Log returns log from component
|
|
func (a Adapter) Log(follow, debug bool) (io.ReadCloser, error) {
|
|
|
|
pod, err := a.Client.GetPodUsingComponentName(a.ComponentName)
|
|
if err != nil {
|
|
return nil, errors.Errorf("the component %s doesn't exist on the cluster", a.ComponentName)
|
|
}
|
|
|
|
if pod.Status.Phase != corev1.PodRunning {
|
|
return nil, errors.Errorf("unable to show logs, component is not in running state. current status=%v", pod.Status.Phase)
|
|
}
|
|
|
|
var command versionsCommon.DevfileCommand
|
|
if debug {
|
|
command, err = common.GetDebugCommand(a.Devfile.Data, "")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if reflect.DeepEqual(versionsCommon.DevfileCommand{}, command) {
|
|
return nil, errors.Errorf("no debug command found in devfile, please run \"odo log\" for run command logs")
|
|
}
|
|
|
|
} else {
|
|
command, err = common.GetRunCommand(a.Devfile.Data, "")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
containerName := command.Exec.Component
|
|
|
|
return a.Client.GetPodLogs(pod.Name, containerName, follow)
|
|
}
|
|
|
|
// Exec executes a command in the component
|
|
func (a Adapter) Exec(command []string) error {
|
|
exists, err := utils.ComponentExists(a.Client, a.ComponentName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !exists {
|
|
return errors.Errorf("the component %s doesn't exist on the cluster", a.ComponentName)
|
|
}
|
|
|
|
runCommand, err := common.GetRunCommand(a.Devfile.Data, "")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
containerName := runCommand.Exec.Component
|
|
|
|
// get the pod
|
|
pod, err := a.Client.GetPodUsingComponentName(a.ComponentName)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unable to get pod for component %s", a.ComponentName)
|
|
}
|
|
|
|
if pod.Status.Phase != corev1.PodRunning {
|
|
return fmt.Errorf("unable to exec as the component is not running. Current status=%v", pod.Status.Phase)
|
|
}
|
|
|
|
componentInfo := common.ComponentInfo{
|
|
PodName: pod.Name,
|
|
ContainerName: containerName,
|
|
}
|
|
|
|
return exec.ExecuteCommand(&a.Client, componentInfo, command, true, nil, nil)
|
|
}
|