mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
Replace odo delete component integration with unit tests (#6904)
This commit is contained in:
@@ -70,7 +70,7 @@ func TestOdoAlizer(t *testing.T) {
|
||||
var output []api.DetectionResult
|
||||
err := json.Unmarshal(b, &output)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkEqual(t, output[0].Devfile, "framework-name")
|
||||
checkEqual(t, output[0].DevfileRegistry, "TheRegistryName")
|
||||
@@ -86,26 +86,26 @@ func TestOdoAlizer(t *testing.T) {
|
||||
if tt.clientset != nil {
|
||||
clientset = tt.clientset()
|
||||
}
|
||||
runCommand(t, tt.args, clientset, func(err error, stdout, stderr string) {
|
||||
runCommand(t, tt.args, runOptions{}, clientset, nil, func(err error, stdout, stderr string) {
|
||||
if (err != nil) != (tt.wantErr != "") {
|
||||
t.Fatalf("errWanted: %v\nGot: %v", tt.wantErr != "", err != nil)
|
||||
}
|
||||
|
||||
if tt.wantErr != "" {
|
||||
if !strings.Contains(err.Error(), tt.wantErr) {
|
||||
t.Fatalf("%q\nerror does not contain:\n%q", err.Error(), tt.wantErr)
|
||||
t.Errorf("%q\nerror does not contain:\n%q", err.Error(), tt.wantErr)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.wantStdout != "" {
|
||||
if !strings.Contains(stdout, tt.wantStdout) {
|
||||
t.Fatalf("%q\nstdout does not contain:\n%q", stdout, tt.wantStdout)
|
||||
t.Errorf("%q\nstdout does not contain:\n%q", stdout, tt.wantStdout)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.wantStderr != "" {
|
||||
if !strings.Contains(stderr, tt.wantStderr) {
|
||||
t.Fatalf("%q\nstderr does not contain:\n%q", stderr, tt.wantStderr)
|
||||
t.Errorf("%q\nstderr does not contain:\n%q", stderr, tt.wantStderr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
envcontext "github.com/redhat-developer/odo/pkg/config/context"
|
||||
"github.com/redhat-developer/odo/pkg/odo/cli"
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions/clientset"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
"github.com/sethvargo/go-envconfig"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
@@ -21,17 +23,24 @@ func resetGlobalFlags() {
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
type runOptions struct {
|
||||
env map[string]string
|
||||
config map[string]string
|
||||
}
|
||||
|
||||
func runCommand(
|
||||
t *testing.T,
|
||||
args []string,
|
||||
options runOptions,
|
||||
clientset clientset.Clientset,
|
||||
populateFS func(fs filesystem.Filesystem),
|
||||
f func(err error, stdout, stderr string),
|
||||
) {
|
||||
|
||||
// We are running the test on a new and empty directory (on real filesystem)
|
||||
originWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Chdir(originWd)
|
||||
@@ -39,16 +48,25 @@ func runCommand(
|
||||
cwd := t.TempDir()
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if populateFS != nil {
|
||||
populateFS(clientset.FS)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
envConfig, err := config.GetConfiguration()
|
||||
envConfig, err := config.GetConfigurationWith(envconfig.MapLookuper(options.config))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx = envcontext.WithEnvConfig(ctx, *envConfig)
|
||||
|
||||
for k, v := range options.env {
|
||||
t.Setenv(k, v)
|
||||
}
|
||||
|
||||
resetGlobalFlags()
|
||||
|
||||
var stdoutB, stderrB bytes.Buffer
|
||||
|
||||
547
cmd/odo/delete_test.go
Normal file
547
cmd/odo/delete_test.go
Normal file
@@ -0,0 +1,547 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/redhat-developer/odo/pkg/kclient"
|
||||
"github.com/redhat-developer/odo/pkg/odo/genericclioptions/clientset"
|
||||
"github.com/redhat-developer/odo/pkg/podman"
|
||||
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
|
||||
"github.com/redhat-developer/odo/pkg/util"
|
||||
"github.com/redhat-developer/odo/tests/helper"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Context
|
||||
|
||||
type testContext struct {
|
||||
platform string
|
||||
fscontent string
|
||||
runningInOption string
|
||||
filesOption string
|
||||
nameOption string
|
||||
}
|
||||
|
||||
// Platform
|
||||
|
||||
type platformFunc func(t *testing.T, env map[string]string, config map[string]string, clientset *clientset.Clientset)
|
||||
|
||||
var noPlatformPlatform platformFunc = func(t *testing.T, env map[string]string, config map[string]string, clientset *clientset.Clientset) {
|
||||
env["KUBECONFIG"] = "/dev/null"
|
||||
config["PODMAN_CMD"] = "not-found"
|
||||
}
|
||||
|
||||
var podmanOnlyPlatform = func() platformFunc {
|
||||
return func(t *testing.T, env map[string]string, config map[string]string, clientset *clientset.Clientset) {
|
||||
env["KUBECONFIG"] = "/dev/null"
|
||||
ctrl := gomock.NewController(t)
|
||||
// Podman is accessible
|
||||
podmanClient := podman.NewMockClient(ctrl)
|
||||
clientset.PodmanClient = podmanClient
|
||||
}
|
||||
}
|
||||
|
||||
var kubernetesOnlyPlatform = func() platformFunc {
|
||||
return func(t *testing.T, env map[string]string, config map[string]string, clientset *clientset.Clientset) {
|
||||
config["PODMAN_CMD"] = "not-found"
|
||||
ctrl := gomock.NewController(t)
|
||||
// kubernetes is accessible
|
||||
kubeClient := kclient.NewMockClientInterface(ctrl)
|
||||
kubeClient.EXPECT().GetCurrentNamespace().Return("a-namespace").AnyTimes()
|
||||
clientset.KubernetesClient = kubeClient
|
||||
}
|
||||
}
|
||||
|
||||
var kubernetesAndPodmanPlatform = func() platformFunc {
|
||||
return func(t *testing.T, env map[string]string, config map[string]string, clientset *clientset.Clientset) {
|
||||
ctrl := gomock.NewController(t)
|
||||
// kubernetes is accessible
|
||||
kubeClient := kclient.NewMockClientInterface(ctrl)
|
||||
clientset.KubernetesClient = kubeClient
|
||||
kubeClient.EXPECT().GetCurrentNamespace().Return("a-namespace").AnyTimes()
|
||||
// Podman is accessible
|
||||
podmanClient := podman.NewMockClient(ctrl)
|
||||
clientset.PodmanClient = podmanClient
|
||||
}
|
||||
}
|
||||
|
||||
var allPlatforms = map[string]platformFunc{
|
||||
"no platform": noPlatformPlatform,
|
||||
"podman only": podmanOnlyPlatform(),
|
||||
"kubernetes only": kubernetesOnlyPlatform(),
|
||||
"kubernetes and podman": kubernetesAndPodmanPlatform(),
|
||||
}
|
||||
|
||||
// FS content
|
||||
|
||||
type fscontentFunc func(fs filesystem.Filesystem)
|
||||
|
||||
var noContentFscontent fscontentFunc = func(fs filesystem.Filesystem) {}
|
||||
|
||||
var nodeJsSourcesFsContent fscontentFunc = func(fs filesystem.Filesystem) {
|
||||
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), ".")
|
||||
}
|
||||
|
||||
type fsOptions struct {
|
||||
dotOdoExists bool
|
||||
generated []string
|
||||
}
|
||||
|
||||
var nodeJsSourcesAndDevfileFsContent = func(devfilePath string, options fsOptions) fscontentFunc {
|
||||
return func(fs filesystem.Filesystem) {
|
||||
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), ".")
|
||||
helper.CopyExampleDevFile(
|
||||
devfilePath,
|
||||
"devfile.yaml",
|
||||
"my-component")
|
||||
if options.dotOdoExists || options.generated != nil {
|
||||
helper.MakeDir(util.DotOdoDirectory)
|
||||
}
|
||||
if options.generated != nil {
|
||||
err := helper.CreateFileWithContent(filepath.Join(util.DotOdoDirectory, "generated"), strings.Join(options.generated, "\n"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var allFscontents = map[string]fscontentFunc{
|
||||
"no content": noContentFscontent,
|
||||
"nodeJS sources": nodeJsSourcesFsContent,
|
||||
"nodeJS sources and Devfile": nodeJsSourcesAndDevfileFsContent(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), fsOptions{}),
|
||||
"nodeJS sources, Devfile and .odo": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
dotOdoExists: true,
|
||||
}),
|
||||
"nodeJS sources and generated Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
generated: []string{"devfile.yaml"},
|
||||
}),
|
||||
}
|
||||
|
||||
// runningIn option
|
||||
|
||||
type runningInOption []string
|
||||
|
||||
var noRunningInOption = []string{}
|
||||
var devRunninInOption = []string{"--running-in", "dev"}
|
||||
var deployRunninInOption = []string{"--running-in", "deploy"}
|
||||
|
||||
var allRunningInOptions = map[string]runningInOption{
|
||||
"no": noRunningInOption,
|
||||
"dev": devRunninInOption,
|
||||
"deploy": deployRunninInOption,
|
||||
}
|
||||
|
||||
// files option
|
||||
|
||||
type filesOption []string
|
||||
|
||||
var noFilesOptions = []string{}
|
||||
var yesFilesOptions = []string{"--files"}
|
||||
|
||||
var allFilesOptions = map[string]filesOption{
|
||||
"no": noFilesOptions,
|
||||
"yes": yesFilesOptions,
|
||||
}
|
||||
|
||||
// name option
|
||||
|
||||
type nameOption []string
|
||||
|
||||
var noNameOptions = []string{}
|
||||
var yesNameOptions = []string{"--name", "my-component"}
|
||||
|
||||
var allNameOptions = map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
"yes": yesNameOptions,
|
||||
}
|
||||
|
||||
// calls checks
|
||||
|
||||
var checkCallsNonDeployedComponent = func(t *testing.T, clientset clientset.Clientset, testContext testContext) {
|
||||
if strings.Contains(testContext.platform, "podman") &&
|
||||
testContext.runningInOption != "deploy" {
|
||||
podmanMock := clientset.PodmanClient.(*podman.MockClient)
|
||||
podmanMock.EXPECT().PodLs()
|
||||
}
|
||||
if strings.Contains(testContext.platform, "kubernetes") {
|
||||
kubeMock := clientset.KubernetesClient.(*kclient.MockClientInterface)
|
||||
dep := appsv1.Deployment{}
|
||||
if testContext.runningInOption != "deploy" {
|
||||
kubeMock.EXPECT().GetDeploymentByName("my-component-app").Return(&dep, nil)
|
||||
}
|
||||
selector := "app.kubernetes.io/instance=my-component,app.kubernetes.io/managed-by=odo,app.kubernetes.io/part-of=app"
|
||||
if testContext.runningInOption == "dev" {
|
||||
selector = selector + ",odo.dev/mode=Dev"
|
||||
} else if testContext.runningInOption == "deploy" {
|
||||
selector = selector + ",odo.dev/mode=Deploy"
|
||||
}
|
||||
kubeMock.EXPECT().GetAllResourcesFromSelector(selector, "a-namespace").Return(nil, nil).AnyTimes()
|
||||
}
|
||||
}
|
||||
|
||||
var checkCallsDeployedComponent = func(t *testing.T, clientset clientset.Clientset, testContext testContext) {
|
||||
if strings.Contains(testContext.platform, "podman") &&
|
||||
testContext.runningInOption != "deploy" {
|
||||
podmanMock := clientset.PodmanClient.(*podman.MockClient)
|
||||
podmanMock.EXPECT().PodLs().Return(map[string]bool{"other-pod": true, "my-component-app": true}, nil)
|
||||
pod := corev1.Pod{}
|
||||
pod.SetName("my-component-app")
|
||||
podmanMock.EXPECT().KubeGenerate("my-component-app").Return(&pod, nil)
|
||||
// The pod and its volumes should be deleted
|
||||
podmanMock.EXPECT().CleanupPodResources(&pod, true)
|
||||
}
|
||||
if strings.Contains(testContext.platform, "kubernetes") {
|
||||
kubeMock := clientset.KubernetesClient.(*kclient.MockClientInterface)
|
||||
dep := appsv1.Deployment{}
|
||||
dep.Kind = "Deployment"
|
||||
dep.SetName("my-component-app")
|
||||
if testContext.runningInOption != "deploy" {
|
||||
kubeMock.EXPECT().GetDeploymentByName("my-component-app").Return(&dep, nil)
|
||||
}
|
||||
selector := "app.kubernetes.io/instance=my-component,app.kubernetes.io/managed-by=odo,app.kubernetes.io/part-of=app"
|
||||
if testContext.runningInOption == "dev" {
|
||||
selector = selector + ",odo.dev/mode=Dev"
|
||||
} else if testContext.runningInOption == "deploy" {
|
||||
selector = selector + ",odo.dev/mode=Deploy"
|
||||
}
|
||||
kubeMock.EXPECT().GetAllResourcesFromSelector(selector, "a-namespace").Return(nil, nil).AnyTimes()
|
||||
kubeMock.EXPECT().GetRestMappingFromUnstructured(gomock.Any()).Return(&meta.RESTMapping{
|
||||
Resource: schema.GroupVersionResource{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
Resource: "deployments",
|
||||
},
|
||||
}, nil)
|
||||
kubeMock.EXPECT().DeleteDynamicResource("my-component-app", schema.GroupVersionResource{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
Resource: "deployments",
|
||||
}, false)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestOdoDeleteMatrix(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
args []string
|
||||
|
||||
platforms map[string]platformFunc
|
||||
fscontents map[string]fscontentFunc
|
||||
runningInOptions map[string]runningInOption
|
||||
filesOptions map[string]filesOption
|
||||
nameOptions map[string]nameOption
|
||||
|
||||
wantErr string
|
||||
checkOutput func(t *testing.T, s string)
|
||||
checkFS func(t *testing.T, fs filesystem.Filesystem)
|
||||
checkCalls func(t *testing.T, clientset clientset.Clientset, tetsContext testContext)
|
||||
}{
|
||||
{
|
||||
name: "delete component when Devfile is not present in the directory",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"no content": noContentFscontent,
|
||||
"nodeJS sources": nodeJsSourcesFsContent,
|
||||
},
|
||||
runningInOptions: allRunningInOptions,
|
||||
filesOptions: allFilesOptions,
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
wantErr: "The current directory does not represent an odo component",
|
||||
},
|
||||
{
|
||||
name: "delete component using both --files and --name",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: allFscontents,
|
||||
runningInOptions: allRunningInOptions,
|
||||
filesOptions: map[string]filesOption{
|
||||
"yes": yesFilesOptions,
|
||||
},
|
||||
nameOptions: map[string]nameOption{
|
||||
"yes": yesNameOptions,
|
||||
},
|
||||
|
||||
wantErr: "'--files' cannot be used with '--name'; '--files' must be used from a directory containing a Devfile",
|
||||
},
|
||||
{
|
||||
name: "delete component passing an invalid running-in",
|
||||
args: []string{"delete", "component", "-f", "--running-in", "invalid-value"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: allFscontents,
|
||||
runningInOptions: map[string]runningInOption{
|
||||
"no": noRunningInOption,
|
||||
},
|
||||
filesOptions: allFilesOptions,
|
||||
nameOptions: allNameOptions,
|
||||
|
||||
wantErr: "invalid value for --running-in: \"invalid-value\". Acceptable values are: dev, deploy",
|
||||
},
|
||||
{
|
||||
name: "using --files in a directory where Devfile was not generated by odo",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"nodeJS sources and Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{}),
|
||||
"nodeJS sources, Devfile and .odo": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
dotOdoExists: true,
|
||||
}),
|
||||
},
|
||||
runningInOptions: allRunningInOptions,
|
||||
filesOptions: map[string]filesOption{
|
||||
"yes": yesFilesOptions,
|
||||
},
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
checkOutput: func(t *testing.T, s string) {
|
||||
gomega.Expect(s).ToNot(gomega.ContainSubstring("devfile.yaml"), "should not list the devfile.yaml")
|
||||
},
|
||||
checkFS: func(t *testing.T, fs filesystem.Filesystem) {
|
||||
fileList := helper.ListFilesInDir(".")
|
||||
gomega.Expect(fileList).Should(gomega.ContainElement("devfile.yaml"), "should not delete the devfile.yaml")
|
||||
},
|
||||
checkCalls: checkCallsNonDeployedComponent,
|
||||
},
|
||||
{
|
||||
name: "using --files in a directory where Devfile was generated by odo",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"nodeJS sources and generated Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
generated: []string{"devfile.yaml"},
|
||||
}),
|
||||
},
|
||||
runningInOptions: allRunningInOptions,
|
||||
filesOptions: map[string]filesOption{
|
||||
"yes": yesFilesOptions,
|
||||
},
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
checkOutput: func(t *testing.T, s string) {
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("devfile.yaml"), "should list the devfile.yaml")
|
||||
},
|
||||
checkFS: func(t *testing.T, fs filesystem.Filesystem) {
|
||||
fileList := helper.ListFilesInDir(".")
|
||||
gomega.Expect(fileList).ShouldNot(gomega.ContainElement("devfile.yaml"), "should delete the devfile.yaml")
|
||||
},
|
||||
checkCalls: checkCallsNonDeployedComponent,
|
||||
},
|
||||
{
|
||||
name: "delete a non deployed component",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: allPlatforms,
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"nodeJS sources and Devfile": nodeJsSourcesAndDevfileFsContent(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), fsOptions{}),
|
||||
"nodeJS sources, Devfile and .odo": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
dotOdoExists: true,
|
||||
}),
|
||||
"nodeJS sources and generated Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
generated: []string{"devfile.yaml"},
|
||||
}),
|
||||
},
|
||||
runningInOptions: allRunningInOptions,
|
||||
filesOptions: allFilesOptions,
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
checkOutput: func(t *testing.T, s string) {
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("No resource found for component %q", "my-component"))
|
||||
},
|
||||
checkCalls: checkCallsNonDeployedComponent,
|
||||
},
|
||||
{
|
||||
name: "delete a component deployed on podman",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: map[string]platformFunc{
|
||||
"podman only": podmanOnlyPlatform(),
|
||||
"kubernetes and podman": kubernetesAndPodmanPlatform(),
|
||||
},
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"nodeJS sources and Devfile": nodeJsSourcesAndDevfileFsContent(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), fsOptions{}),
|
||||
"nodeJS sources, Devfile and .odo": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
dotOdoExists: true,
|
||||
}),
|
||||
"nodeJS sources and generated Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
generated: []string{"devfile.yaml"},
|
||||
}),
|
||||
},
|
||||
runningInOptions: map[string]runningInOption{
|
||||
"no": noRunningInOption,
|
||||
"dev": devRunninInOption,
|
||||
},
|
||||
filesOptions: allFilesOptions,
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
checkOutput: func(t *testing.T, s string) {
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("The following pods and associated volumes will get deleted from podman"))
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("- my-component-app"))
|
||||
},
|
||||
checkCalls: checkCallsDeployedComponent,
|
||||
},
|
||||
{
|
||||
name: "delete a component deployed on kubernetes",
|
||||
args: []string{"delete", "component", "-f"},
|
||||
|
||||
platforms: map[string]platformFunc{
|
||||
"kubernetes only": kubernetesOnlyPlatform(),
|
||||
"kubernetes and podman": kubernetesAndPodmanPlatform(),
|
||||
},
|
||||
fscontents: map[string]fscontentFunc{
|
||||
"nodeJS sources and Devfile": nodeJsSourcesAndDevfileFsContent(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), fsOptions{}),
|
||||
"nodeJS sources, Devfile and .odo": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
dotOdoExists: true,
|
||||
}),
|
||||
"nodeJS sources and generated Devfile": nodeJsSourcesAndDevfileFsContent(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
fsOptions{
|
||||
generated: []string{"devfile.yaml"},
|
||||
}),
|
||||
},
|
||||
runningInOptions: map[string]runningInOption{
|
||||
"no": noRunningInOption,
|
||||
"dev": devRunninInOption,
|
||||
},
|
||||
filesOptions: allFilesOptions,
|
||||
nameOptions: map[string]nameOption{
|
||||
"no": noNameOptions,
|
||||
},
|
||||
|
||||
checkOutput: func(t *testing.T, s string) {
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("The following resources will get deleted from cluster"))
|
||||
gomega.Expect(s).To(gomega.ContainSubstring("- Deployment: my-component-app"))
|
||||
},
|
||||
checkCalls: checkCallsDeployedComponent,
|
||||
},
|
||||
} {
|
||||
if tt.platforms == nil {
|
||||
t.Fatal("platforms cannot be nil")
|
||||
}
|
||||
for platform, platformFunc := range tt.platforms {
|
||||
platform := platform
|
||||
platformFunc := platformFunc
|
||||
if tt.fscontents == nil {
|
||||
t.Fatal("fscontents cannot be nil")
|
||||
}
|
||||
for fscontent, fscontentFunc := range tt.fscontents {
|
||||
fscontent := fscontent
|
||||
fscontentFunc := fscontentFunc
|
||||
if tt.runningInOptions == nil {
|
||||
t.Fatal("runningInOptions cannot be nil")
|
||||
}
|
||||
for runningInOption, runningInOptionValue := range tt.runningInOptions {
|
||||
runningInOption := runningInOption
|
||||
runningInOptionValue := runningInOptionValue
|
||||
if tt.filesOptions == nil {
|
||||
t.Fatal("filesOptions cannot be nil")
|
||||
}
|
||||
for filesOption, filesOptionValue := range tt.filesOptions {
|
||||
filesOption := filesOption
|
||||
filesOptionValue := filesOptionValue
|
||||
if tt.nameOptions == nil {
|
||||
t.Fatal("nameOptions cannot be nil")
|
||||
}
|
||||
for nameOption, nameOptionValue := range tt.nameOptions {
|
||||
nameOption := nameOption
|
||||
nameOptionValue := nameOptionValue
|
||||
|
||||
testCtx := testContext{
|
||||
platform: platform,
|
||||
fscontent: fscontent,
|
||||
runningInOption: runningInOption,
|
||||
filesOption: filesOption,
|
||||
nameOption: nameOption,
|
||||
}
|
||||
t.Run(
|
||||
tt.name+
|
||||
fmt.Sprintf(" [platform=%s]", platform)+
|
||||
fmt.Sprintf(" [fscontent=%s]", fscontent)+
|
||||
fmt.Sprintf(" [runningInOptions=%s]", runningInOption)+
|
||||
fmt.Sprintf(" [filesOption=%s]", filesOption)+
|
||||
fmt.Sprintf(" [nameOption=%s]", nameOption),
|
||||
func(t *testing.T) {
|
||||
gomega.RegisterFailHandler(func(message string, callerSkip ...int) {
|
||||
t.Fatalf(message)
|
||||
})
|
||||
clientset := clientset.Clientset{}
|
||||
env := map[string]string{}
|
||||
config := map[string]string{}
|
||||
platformFunc(t, env, config, &clientset)
|
||||
if tt.checkCalls != nil {
|
||||
tt.checkCalls(t, clientset, testCtx)
|
||||
}
|
||||
|
||||
args := append(tt.args, runningInOptionValue...)
|
||||
args = append(args, filesOptionValue...)
|
||||
args = append(args, nameOptionValue...)
|
||||
runCommand(t, args, runOptions{env: env, config: config}, clientset, fscontentFunc, func(err error, stdout, stderr string) {
|
||||
if (err != nil) != (tt.wantErr != "") {
|
||||
t.Fatalf("errWanted: %v\nGot: %v (%s)", tt.wantErr != "", err != nil, err)
|
||||
}
|
||||
|
||||
if tt.wantErr != "" {
|
||||
if !strings.Contains(err.Error(), tt.wantErr) {
|
||||
t.Errorf("%q\nerror does not contain:\n%q", err.Error(), tt.wantErr)
|
||||
}
|
||||
}
|
||||
if tt.checkOutput != nil {
|
||||
tt.checkOutput(t, stdout)
|
||||
}
|
||||
|
||||
if tt.checkFS != nil {
|
||||
tt.checkFS(t, clientset.FS)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
11
cmd/odo/doc.go
Normal file
11
cmd/odo/doc.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// package main includes tests for odo covering (at least) the CLI packages.
|
||||
// You can run the tests on this package and get the coverage of these tests
|
||||
// across the entire sources with the commands:
|
||||
//
|
||||
// $ go test -v -coverpkg=./... -coverprofile=profile.cov ./cmd/odo
|
||||
// $ go tool cover -html profile.cov
|
||||
//
|
||||
// To get the coverage of all the tests across the entire sources:
|
||||
// $ go test -v -coverpkg=./... -coverprofile=profile.cov ./cmd/odo ./pkg/...
|
||||
// $ go tool cover -html profile.cov
|
||||
package main
|
||||
108
pkg/kclient/all_test.go
Normal file
108
pkg/kclient/all_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package kclient
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
func TestClient_GetAllResourcesFromSelector(t *testing.T) {
|
||||
type args struct {
|
||||
selector string
|
||||
ns string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
objects func() []runtime.Object
|
||||
checkResult func([]unstructured.Unstructured)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "a deployment exists, matching labels",
|
||||
args: args{
|
||||
selector: "key1=value1",
|
||||
},
|
||||
objects: func() []runtime.Object {
|
||||
dep1 := appsv1.Deployment{}
|
||||
dep1.SetName("deploy1")
|
||||
dep1.SetLabels(map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
})
|
||||
return []runtime.Object{&dep1}
|
||||
},
|
||||
checkResult: func(u []unstructured.Unstructured) {
|
||||
if len(u) != 1 {
|
||||
t.Fatalf("len of result should be %d but is %d", 1, len(u))
|
||||
}
|
||||
if u[0].GetName() != "deploy1" {
|
||||
t.Errorf("Name of 1st result should be %q but is %q", "deploy1", u[0].GetName())
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "a deployment exists, not matching labels",
|
||||
args: args{
|
||||
selector: "key1=value1",
|
||||
},
|
||||
objects: func() []runtime.Object {
|
||||
dep1 := appsv1.Deployment{}
|
||||
dep1.SetName("deploy1")
|
||||
dep1.SetLabels(map[string]string{
|
||||
"key1": "value2",
|
||||
"key2": "value1",
|
||||
})
|
||||
return []runtime.Object{&dep1}
|
||||
},
|
||||
checkResult: func(u []unstructured.Unstructured) {
|
||||
if len(u) != 0 {
|
||||
t.Fatalf("len of result should be %d but is %d", 0, len(u))
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
fkclient, fkclientset := FakeNew()
|
||||
fkclient.Namespace = "default"
|
||||
|
||||
objects := []runtime.Object{}
|
||||
if tt.objects != nil {
|
||||
objects = tt.objects()
|
||||
}
|
||||
fkclient.SetDynamicClient(scheme.Scheme, objects...)
|
||||
|
||||
fkclientset.Kubernetes.Fake.Resources = []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "apps/v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
Kind: "Deployment",
|
||||
Name: "deployments",
|
||||
SingularName: "deployment",
|
||||
Namespaced: true,
|
||||
Verbs: []string{"list"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := fkclient.GetAllResourcesFromSelector(tt.args.selector, tt.args.ns)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Client.GetAllResourcesFromSelector() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.checkResult != nil {
|
||||
tt.checkResult(got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
odoFake "github.com/redhat-developer/odo/pkg/kclient/fake"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
fakeKubeClientset "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
@@ -47,7 +49,8 @@ func FakeNewWithIngressSupports(networkingv1Supported, extensionV1Supported bool
|
||||
client.isExtensionV1Beta1IngressSupported = extensionV1Supported
|
||||
client.isNetworkingV1IngressSupported = networkingv1Supported
|
||||
client.checkIngressSupports = false
|
||||
client.SetDiscoveryInterface(NewKubernetesFakedDiscovery(true, true))
|
||||
client.discoveryClient = fkclientset.Kubernetes.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
client.cachedDiscoveryClient = memory.NewMemCacheClient(client.discoveryClient)
|
||||
|
||||
fkclientset.ProjClientset = fakeProjClientset.NewSimpleClientset()
|
||||
client.projectClient = fkclientset.ProjClientset.ProjectV1()
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package kclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"k8s.io/kubectl/pkg/util/term"
|
||||
"strings"
|
||||
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
|
||||
@@ -13,9 +16,11 @@ import (
|
||||
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
@@ -84,6 +89,10 @@ var _ platform.Client = (*Client)(nil)
|
||||
|
||||
// New creates a new client
|
||||
func New() (*Client, error) {
|
||||
// Inside a cluster (IBM Cloud CI for example), even if KUBECONFIG=/dev/null, the in-cluster connection would succeed
|
||||
if os.Getenv("KUBECONFIG") == "/dev/null" {
|
||||
return nil, errors.New("access to Kubernetes cluster is disabled by KUBECONFIG=/dev/null")
|
||||
}
|
||||
return NewForConfig(nil)
|
||||
}
|
||||
|
||||
@@ -220,6 +229,10 @@ func (c *Client) SetDiscoveryInterface(client discovery.DiscoveryInterface) {
|
||||
c.discoveryClient = client
|
||||
}
|
||||
|
||||
func (c *Client) SetDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) {
|
||||
c.DynamicClient = fake.NewSimpleDynamicClient(scheme, objects...)
|
||||
}
|
||||
|
||||
func (c *Client) IsResourceSupported(apiGroup, apiVersion, resourceName string) (bool, error) {
|
||||
klog.V(4).Infof("Checking if %q resource is supported", resourceName)
|
||||
|
||||
|
||||
@@ -238,6 +238,14 @@ func Printf(format string, a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Fprintf will output in an appropriate "information" manner; for e.g.
|
||||
// • <message>
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) {
|
||||
if !IsJSON() {
|
||||
fmt.Fprintf(w, "%s%s%s%s\n", prefixSpacing, getSpacingString(), suffixSpacing, fmt.Sprintf(format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// Println will output a new line when applicable
|
||||
func Println() {
|
||||
if !IsJSON() {
|
||||
@@ -245,6 +253,13 @@ func Println() {
|
||||
}
|
||||
}
|
||||
|
||||
// Fprintln will output a new line when applicable
|
||||
func Fprintln(w io.Writer) {
|
||||
if !IsJSON() {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
}
|
||||
|
||||
// Success will output in an appropriate "success" manner
|
||||
// ✓ <message>
|
||||
func Success(a ...interface{}) {
|
||||
@@ -291,6 +306,16 @@ func Warningf(format string, a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Fwarningf will output in an appropriate "warning" manner
|
||||
//
|
||||
// ⚠ <message>
|
||||
func Fwarningf(w io.Writer, format string, a ...interface{}) {
|
||||
if !IsJSON() {
|
||||
yellow := color.New(color.FgYellow).SprintFunc()
|
||||
fmt.Fprintf(w, " %s%s%s\n", yellow(getWarningString()), suffixSpacing, fmt.Sprintf(format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// Fsuccess will output in an appropriate "progress" manner in out writer
|
||||
//
|
||||
// ✓ <message>
|
||||
@@ -384,6 +409,15 @@ func Errorf(format string, a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Ferrorf will output in an appropriate "progress" manner
|
||||
// ✗ <message>
|
||||
func Ferrorf(w io.Writer, format string, a ...interface{}) {
|
||||
if !IsJSON() {
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
fmt.Fprintf(w, " %s%s%s\n", red(getErrString()), suffixSpacing, fmt.Sprintf(format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// Error will output in an appropriate "progress" manner
|
||||
// ✗ <message>
|
||||
func Error(a ...interface{}) {
|
||||
@@ -393,6 +427,15 @@ func Error(a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Frror will output in an appropriate "progress" manner
|
||||
// ✗ <message>
|
||||
func Ferror(w io.Writer, a ...interface{}) {
|
||||
if !IsJSON() {
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
fmt.Fprintf(w, "%s%s%s%s", prefixSpacing, red(getErrString()), suffixSpacing, fmt.Sprintln(a...))
|
||||
}
|
||||
}
|
||||
|
||||
// Info will simply print out information on a new (bolded) line
|
||||
// this is intended as information *after* something has been deployed
|
||||
// **Line in bold**
|
||||
@@ -479,6 +522,16 @@ func Spinnerf(format string, a ...interface{}) *Status {
|
||||
return s
|
||||
}
|
||||
|
||||
// Fspinnerf creates a spinner, sets the prefix then returns it.
|
||||
// Remember to use .End(bool) to stop the spin / when you're done.
|
||||
// For example: defer s.End(false)
|
||||
// for situations where spinning isn't viable (debug)
|
||||
func Fspinnerf(w io.Writer, format string, a ...interface{}) *Status {
|
||||
s := NewStatus(w)
|
||||
s.Start(fmt.Sprintf(format, a...), IsDebug())
|
||||
return s
|
||||
}
|
||||
|
||||
// SpinnerNoSpin is the same as the "Spinner" function but forces no spinning
|
||||
func SpinnerNoSpin(status string) *Status {
|
||||
return ExplicitSpinner(status, true)
|
||||
|
||||
@@ -141,7 +141,7 @@ func (o *ComponentOptions) Run(ctx context.Context) error {
|
||||
}
|
||||
remainingResources, err := o.deleteDevfileComponent(ctx)
|
||||
if err == nil {
|
||||
printRemainingResources(ctx, remainingResources)
|
||||
o.printRemainingResources(ctx, remainingResources)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func (o *ComponentOptions) deleteNamedComponent(ctx context.Context) error {
|
||||
podmanResources []*corev1.Pod
|
||||
err error
|
||||
)
|
||||
log.Info("Searching resources to delete, please wait...")
|
||||
log.Finfof(o.clientset.Stdout, "Searching resources to delete, please wait...")
|
||||
if o.clientset.KubernetesClient != nil {
|
||||
clusterResources, err = o.clientset.DeleteClient.ListClusterResourcesToDelete(ctx, o.name, o.namespace, o.runningIn)
|
||||
if err != nil {
|
||||
@@ -171,37 +171,37 @@ func (o *ComponentOptions) deleteNamedComponent(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if len(clusterResources) == 0 && len(podmanResources) == 0 {
|
||||
log.Infof(messageWithPlatforms(
|
||||
log.Finfof(o.clientset.Stdout, messageWithPlatforms(
|
||||
o.clientset.KubernetesClient != nil,
|
||||
o.clientset.PodmanClient != nil,
|
||||
o.name, o.namespace,
|
||||
))
|
||||
return nil
|
||||
}
|
||||
printDevfileComponents(o.name, o.namespace, clusterResources, podmanResources)
|
||||
o.printDevfileComponents(o.name, o.namespace, clusterResources, podmanResources)
|
||||
|
||||
if o.forceFlag || ui.Proceed("Are you sure you want to delete these resources?") {
|
||||
|
||||
if len(clusterResources) > 0 {
|
||||
spinner := log.Spinnerf("Deleting resources from cluster")
|
||||
spinner := log.Fspinnerf(o.clientset.Stdout, "Deleting resources from cluster")
|
||||
failed := o.clientset.DeleteClient.DeleteResources(clusterResources, o.waitFlag)
|
||||
for _, fail := range failed {
|
||||
log.Warningf("Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
|
||||
log.Fwarningf(o.clientset.Stderr, "Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
|
||||
}
|
||||
spinner.End(true)
|
||||
successMsg := fmt.Sprintf("The component %q is successfully deleted from namespace %q", o.name, o.namespace)
|
||||
if o.runningIn != "" {
|
||||
successMsg = fmt.Sprintf("The component %q running in the %s mode is successfully deleted from namespace %q", o.name, o.runningIn, o.namespace)
|
||||
}
|
||||
log.Info(successMsg)
|
||||
log.Finfof(o.clientset.Stdout, successMsg)
|
||||
}
|
||||
|
||||
if len(podmanResources) > 0 {
|
||||
spinner := log.Spinnerf("Deleting resources from podman")
|
||||
spinner := log.Fspinnerf(o.clientset.Stdout, "Deleting resources from podman")
|
||||
for _, pod := range podmanResources {
|
||||
err = o.clientset.PodmanClient.CleanupPodResources(pod, true)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to delete the pod %q from podman: %s\n", pod.GetName(), err)
|
||||
log.Fwarningf(o.clientset.Stderr, "Failed to delete the pod %q from podman: %s\n", pod.GetName(), err)
|
||||
}
|
||||
}
|
||||
spinner.End(true)
|
||||
@@ -209,13 +209,13 @@ func (o *ComponentOptions) deleteNamedComponent(ctx context.Context) error {
|
||||
if o.runningIn != "" {
|
||||
successMsg = fmt.Sprintf("The component %q running in the %s mode is successfully deleted podman", o.name, o.runningIn)
|
||||
}
|
||||
log.Info(successMsg)
|
||||
log.Finfof(o.clientset.Stdout, successMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Error("Aborting deletion of component")
|
||||
log.Ferror(o.clientset.Stderr, "Aborting deletion of component")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -231,17 +231,17 @@ func messageWithPlatforms(cluster, podman bool, name, namespace string) string {
|
||||
}
|
||||
|
||||
// printRemainingResources lists the remaining cluster resources that are not found in the devfile.
|
||||
func printRemainingResources(ctx context.Context, remainingResources []unstructured.Unstructured) {
|
||||
func (o *ComponentOptions) printRemainingResources(ctx context.Context, remainingResources []unstructured.Unstructured) {
|
||||
if len(remainingResources) == 0 {
|
||||
return
|
||||
}
|
||||
componentName := odocontext.GetComponentName(ctx)
|
||||
namespace := odocontext.GetNamespace(ctx)
|
||||
log.Printf("There are still resources left in the cluster that might be belonging to the deleted component.")
|
||||
log.Fprintf(o.clientset.Stdout, "There are still resources left in the cluster that might be belonging to the deleted component.")
|
||||
for _, resource := range remainingResources {
|
||||
fmt.Printf("\t- %s: %s\n", resource.GetKind(), resource.GetName())
|
||||
fmt.Fprintf(o.clientset.Stdout, "\t- %s: %s\n", resource.GetKind(), resource.GetName())
|
||||
}
|
||||
log.Infof("If you want to delete those, execute `odo delete component --name %s --namespace %s`\n", componentName, namespace)
|
||||
log.Finfof(o.clientset.Stdout, "If you want to delete those, execute `odo delete component --name %s --namespace %s`\n", componentName, namespace)
|
||||
}
|
||||
|
||||
// deleteDevfileComponent deletes all the components defined by the devfile in the current directory
|
||||
@@ -265,14 +265,14 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
err error
|
||||
)
|
||||
|
||||
log.Info("Searching resources to delete, please wait...")
|
||||
log.Finfof(o.clientset.Stdout, "Searching resources to delete, please wait...")
|
||||
|
||||
if o.clientset.KubernetesClient != nil {
|
||||
isClusterInnerLoopDeployed, clusterResources, err = o.clientset.DeleteClient.ListClusterResourcesToDeleteFromDevfile(
|
||||
*devfileObj, appName, componentName, o.runningIn)
|
||||
if err != nil {
|
||||
if clierrors.AsWarning(err) {
|
||||
log.Warning(err.Error())
|
||||
log.Fwarning(o.clientset.Stderr, err.Error())
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
@@ -291,7 +291,7 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
isPodmanInnerLoopDeployed, podmanPods, err = o.clientset.DeleteClient.ListPodmanResourcesToDelete(appName, componentName, o.runningIn)
|
||||
if err != nil {
|
||||
if clierrors.AsWarning(err) {
|
||||
log.Warning(err.Error())
|
||||
log.Fwarning(o.clientset.Stderr, err.Error())
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
@@ -300,14 +300,14 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
}
|
||||
|
||||
if !(hasClusterResources || hasPodmanResources) {
|
||||
log.Infof(messageWithPlatforms(o.clientset.KubernetesClient != nil, o.clientset.PodmanClient != nil, componentName, namespace))
|
||||
log.Finfof(o.clientset.Stdout, messageWithPlatforms(o.clientset.KubernetesClient != nil, o.clientset.PodmanClient != nil, componentName, namespace))
|
||||
if !o.withFilesFlag {
|
||||
// check for resources here
|
||||
return remainingResources, nil
|
||||
}
|
||||
}
|
||||
|
||||
printDevfileComponents(componentName, namespace, clusterResources, podmanPods)
|
||||
o.printDevfileComponents(componentName, namespace, clusterResources, podmanPods)
|
||||
|
||||
var filesToDelete []string
|
||||
if o.withFilesFlag {
|
||||
@@ -315,7 +315,7 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
printFileCreatedByOdo(filesToDelete, hasClusterResources)
|
||||
o.printFileCreatedByOdo(filesToDelete, hasClusterResources)
|
||||
}
|
||||
hasFilesToDelete := len(filesToDelete) != 0
|
||||
|
||||
@@ -331,29 +331,29 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
if o.forceFlag || ui.Proceed(msg) {
|
||||
|
||||
if hasClusterResources {
|
||||
spinner := log.Spinnerf("Deleting resources from cluster")
|
||||
spinner := log.Fspinnerf(o.clientset.Stdout, "Deleting resources from cluster")
|
||||
|
||||
// if innerloop deployment resource is present, then execute preStop events
|
||||
if isClusterInnerLoopDeployed {
|
||||
err = o.clientset.DeleteClient.ExecutePreStopEvents(ctx, *devfileObj, appName, componentName)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to execute preStop events: %v", err)
|
||||
log.Ferrorf(o.clientset.Stderr, "Failed to execute preStop events: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete all the resources
|
||||
failed := o.clientset.DeleteClient.DeleteResources(clusterResources, o.waitFlag)
|
||||
for _, fail := range failed {
|
||||
log.Warningf("Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
|
||||
log.Fwarningf(o.clientset.Stderr, "Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
|
||||
}
|
||||
|
||||
spinner.End(true)
|
||||
log.Infof("The component %q is successfully deleted from namespace %q\n", componentName, namespace)
|
||||
log.Finfof(o.clientset.Stdout, "The component %q is successfully deleted from namespace %q\n", componentName, namespace)
|
||||
|
||||
}
|
||||
|
||||
if hasPodmanResources {
|
||||
spinner := log.Spinnerf("Deleting resources from podman")
|
||||
spinner := log.Fspinnerf(o.clientset.Stdout, "Deleting resources from podman")
|
||||
if isPodmanInnerLoopDeployed {
|
||||
// TODO(feloy) #6424
|
||||
_ = isPodmanInnerLoopDeployed
|
||||
@@ -361,11 +361,11 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
for _, pod := range podmanPods {
|
||||
err = o.clientset.PodmanClient.CleanupPodResources(pod, true)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to delete the pod %q from podman: %s\n", pod.GetName(), err)
|
||||
log.Fwarningf(o.clientset.Stderr, "Failed to delete the pod %q from podman: %s\n", pod.GetName(), err)
|
||||
}
|
||||
}
|
||||
spinner.End(true)
|
||||
log.Infof("The component %q is successfully deleted from podman", componentName)
|
||||
log.Finfof(o.clientset.Stdout, "The component %q is successfully deleted from podman", componentName)
|
||||
}
|
||||
|
||||
if o.withFilesFlag {
|
||||
@@ -373,19 +373,19 @@ func (o *ComponentOptions) deleteDevfileComponent(ctx context.Context) ([]unstru
|
||||
remainingFiles := o.deleteFilesCreatedByOdo(o.clientset.FS, filesToDelete)
|
||||
var listOfFiles []string
|
||||
for f, e := range remainingFiles {
|
||||
log.Warningf("Failed to delete file or directory: %s: %v\n", f, e)
|
||||
log.Fwarningf(o.clientset.Stderr, "Failed to delete file or directory: %s: %v\n", f, e)
|
||||
listOfFiles = append(listOfFiles, "\t- "+f)
|
||||
}
|
||||
if len(remainingFiles) != 0 {
|
||||
log.Printf("There are still files or directories that could not be deleted.")
|
||||
fmt.Println(strings.Join(listOfFiles, "\n"))
|
||||
log.Info("You need to manually delete those.")
|
||||
log.Fprintf(o.clientset.Stdout, "There are still files or directories that could not be deleted.")
|
||||
fmt.Fprintln(o.clientset.Stdout, strings.Join(listOfFiles, "\n"))
|
||||
log.Finfof(o.clientset.Stdout, "You need to manually delete those.")
|
||||
}
|
||||
}
|
||||
return remainingResources, nil
|
||||
}
|
||||
|
||||
log.Error("Aborting deletion of component")
|
||||
log.Ferror(o.clientset.Stderr, "Aborting deletion of component")
|
||||
return remainingResources, nil
|
||||
}
|
||||
|
||||
@@ -410,12 +410,12 @@ func listResourcesMissingFromDevfilePresentOnCluster(componentName string, devfi
|
||||
}
|
||||
|
||||
// printDevfileResources prints the devfile components for ComponentOptions.deleteDevfileComponent
|
||||
func printDevfileComponents(
|
||||
func (o *ComponentOptions) printDevfileComponents(
|
||||
componentName, namespace string,
|
||||
k8sResources []unstructured.Unstructured,
|
||||
podmanResources []*corev1.Pod,
|
||||
) {
|
||||
log.Infof(infoMsg(
|
||||
log.Finfof(o.clientset.Stdout, infoMsg(
|
||||
len(k8sResources) != 0,
|
||||
len(podmanResources) != 0,
|
||||
componentName,
|
||||
@@ -423,19 +423,19 @@ func printDevfileComponents(
|
||||
))
|
||||
|
||||
if len(k8sResources) != 0 {
|
||||
log.Printf("The following resources will get deleted from cluster:")
|
||||
log.Fprintf(o.clientset.Stdout, "The following resources will get deleted from cluster:")
|
||||
for _, resource := range k8sResources {
|
||||
log.Printf("\t- %s: %s", resource.GetKind(), resource.GetName())
|
||||
log.Fprintf(o.clientset.Stdout, "\t- %s: %s", resource.GetKind(), resource.GetName())
|
||||
}
|
||||
log.Println()
|
||||
log.Fprintln(o.clientset.Stdout)
|
||||
}
|
||||
|
||||
if len(podmanResources) != 0 {
|
||||
log.Printf("The following pods and associated volumes will get deleted from podman:")
|
||||
log.Fprintf(o.clientset.Stdout, "The following pods and associated volumes will get deleted from podman:")
|
||||
for _, pod := range podmanResources {
|
||||
log.Printf("\t- %s", pod.GetName())
|
||||
log.Fprintf(o.clientset.Stdout, "\t- %s", pod.GetName())
|
||||
}
|
||||
log.Println()
|
||||
log.Fprintln(o.clientset.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,7 +477,7 @@ func getFilesCreatedByOdo(filesys filesystem.Filesystem, ctx context.Context) ([
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func printFileCreatedByOdo(files []string, hasClusterResources bool) {
|
||||
func (o *ComponentOptions) printFileCreatedByOdo(files []string, hasClusterResources bool) {
|
||||
if len(files) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -486,9 +486,9 @@ func printFileCreatedByOdo(files []string, hasClusterResources bool) {
|
||||
if hasClusterResources {
|
||||
m += "also "
|
||||
}
|
||||
log.Info(m + "delete the following files and directories:")
|
||||
log.Finfof(o.clientset.Stdout, m+"delete the following files and directories:")
|
||||
for _, f := range files {
|
||||
fmt.Println("\t- " + f)
|
||||
fmt.Fprintln(o.clientset.Stdout, "\t- "+f)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -377,6 +377,8 @@ func TestComponentOptions_deleteNamedComponent(t *testing.T) {
|
||||
forceFlag: tt.fields.forceFlag,
|
||||
runningIn: tt.fields.runningIn,
|
||||
clientset: &clientset.Clientset{
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
KubernetesClient: tt.fields.kubernetesClient(ctrl),
|
||||
DeleteClient: tt.fields.deleteComponentClient(ctrl),
|
||||
PodmanClient: tt.fields.podmanClient(ctrl),
|
||||
@@ -643,6 +645,8 @@ func TestComponentOptions_deleteDevfileComponent(t *testing.T) {
|
||||
forceFlag: tt.fields.forceFlag,
|
||||
runningIn: tt.fields.runningIn,
|
||||
clientset: &clientset.Clientset{
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
KubernetesClient: kubeClient,
|
||||
DeleteClient: deleteClient,
|
||||
},
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/redhat-developer/odo/pkg/dev/kubedev"
|
||||
"github.com/redhat-developer/odo/pkg/dev/podmandev"
|
||||
"github.com/redhat-developer/odo/pkg/exec"
|
||||
"github.com/redhat-developer/odo/pkg/log"
|
||||
"github.com/redhat-developer/odo/pkg/logs"
|
||||
"github.com/redhat-developer/odo/pkg/odo/commonflags"
|
||||
"github.com/redhat-developer/odo/pkg/podman"
|
||||
@@ -179,9 +180,13 @@ func Fetch(command *cobra.Command, platform string, testClientset Clientset) (*C
|
||||
|
||||
if testClientset.Stdout != nil {
|
||||
dep.Stdout = testClientset.Stdout
|
||||
} else {
|
||||
dep.Stdout = log.GetStdout()
|
||||
}
|
||||
if testClientset.Stderr != nil {
|
||||
dep.Stderr = testClientset.Stderr
|
||||
} else {
|
||||
dep.Stderr = log.GetStderr()
|
||||
}
|
||||
|
||||
/* Without sub-dependencies */
|
||||
@@ -193,26 +198,33 @@ func Fetch(command *cobra.Command, platform string, testClientset Clientset) (*C
|
||||
}
|
||||
}
|
||||
if isDefined(command, KUBERNETES) || isDefined(command, KUBERNETES_NULLABLE) {
|
||||
dep.KubernetesClient, err = kclient.New()
|
||||
if err != nil {
|
||||
// only return error is KUBERNETES_NULLABLE is not defined in combination with KUBERNETES
|
||||
if isDefined(command, KUBERNETES) && !isDefined(command, KUBERNETES_NULLABLE) {
|
||||
return nil, err
|
||||
if testClientset.KubernetesClient != nil {
|
||||
dep.KubernetesClient = testClientset.KubernetesClient
|
||||
} else {
|
||||
dep.KubernetesClient, err = kclient.New()
|
||||
if err != nil {
|
||||
// only return error is KUBERNETES_NULLABLE is not defined in combination with KUBERNETES
|
||||
if isDefined(command, KUBERNETES) && !isDefined(command, KUBERNETES_NULLABLE) {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(3).Infof("no Kubernetes client initialized: %v", err)
|
||||
dep.KubernetesClient = nil
|
||||
}
|
||||
klog.V(3).Infof("no Kubernetes client initialized: %v", err)
|
||||
dep.KubernetesClient = nil
|
||||
}
|
||||
|
||||
}
|
||||
if isDefined(command, PODMAN) || isDefined(command, PODMAN_NULLABLE) {
|
||||
dep.PodmanClient, err = podman.NewPodmanCli(ctx)
|
||||
if err != nil {
|
||||
// send error in case the command is to run on podman platform or if PODMAN clientset is required.
|
||||
if isDefined(command, PODMAN) || platform == commonflags.PlatformPodman {
|
||||
return nil, podman.NewPodmanNotFoundError(err)
|
||||
if testClientset.PodmanClient != nil {
|
||||
dep.PodmanClient = testClientset.PodmanClient
|
||||
} else {
|
||||
dep.PodmanClient, err = podman.NewPodmanCli(ctx)
|
||||
if err != nil {
|
||||
// send error in case the command is to run on podman platform or if PODMAN clientset is required.
|
||||
if isDefined(command, PODMAN) || platform == commonflags.PlatformPodman {
|
||||
return nil, podman.NewPodmanNotFoundError(err)
|
||||
}
|
||||
klog.V(3).Infof("no Podman client initialized: %v", err)
|
||||
dep.PodmanClient = nil
|
||||
}
|
||||
klog.V(3).Infof("no Podman client initialized: %v", err)
|
||||
dep.PodmanClient = nil
|
||||
}
|
||||
}
|
||||
if isDefined(command, PREFERENCE) {
|
||||
|
||||
416
pkg/podman/podman_test.go
Normal file
416
pkg/podman/podman_test.go
Normal file
@@ -0,0 +1,416 @@
|
||||
package podman
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestPodmanCli_PodLs(t *testing.T) {
|
||||
type fields struct {
|
||||
podmanCmd string
|
||||
podmanCmdInitTimeout time.Duration
|
||||
containerRunGlobalExtraArgs []string
|
||||
containerRunExtraArgs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
populateFS func()
|
||||
want map[string]bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "command fails",
|
||||
fields: fields{
|
||||
podmanCmd: "false",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "command works, returns nothing",
|
||||
fields: fields{
|
||||
podmanCmd: "true",
|
||||
},
|
||||
wantErr: false,
|
||||
want: map[string]bool{},
|
||||
},
|
||||
{
|
||||
name: "command works, returns pods",
|
||||
fields: fields{
|
||||
podmanCmd: "./podman.fake.sh",
|
||||
},
|
||||
populateFS: func() {
|
||||
script := []byte(`#!/bin/sh
|
||||
case "$*" in
|
||||
"pod list --format {{.Name}} --noheading")
|
||||
echo name1
|
||||
echo name2
|
||||
echo name3
|
||||
;;
|
||||
esac`)
|
||||
err := os.WriteFile("podman.fake.sh", script, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
want: map[string]bool{
|
||||
"name1": true,
|
||||
"name2": true,
|
||||
"name3": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
if tt.populateFS != nil {
|
||||
originWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Chdir(originWd)
|
||||
}()
|
||||
cwd := t.TempDir()
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.populateFS()
|
||||
}
|
||||
|
||||
o := &PodmanCli{
|
||||
podmanCmd: tt.fields.podmanCmd,
|
||||
podmanCmdInitTimeout: tt.fields.podmanCmdInitTimeout,
|
||||
containerRunGlobalExtraArgs: tt.fields.containerRunGlobalExtraArgs,
|
||||
containerRunExtraArgs: tt.fields.containerRunExtraArgs,
|
||||
}
|
||||
got, err := o.PodLs()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("PodmanCli.PodLs() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("PodmanCli.PodLs() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodmanCli_KubeGenerate(t *testing.T) {
|
||||
type fields struct {
|
||||
podmanCmd string
|
||||
podmanCmdInitTimeout time.Duration
|
||||
containerRunGlobalExtraArgs []string
|
||||
containerRunExtraArgs []string
|
||||
}
|
||||
type args struct {
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
populateFS func()
|
||||
args args
|
||||
checkResult func(*corev1.Pod)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "pod not found",
|
||||
args: args{
|
||||
name: "pod-not-found",
|
||||
},
|
||||
fields: fields{
|
||||
podmanCmd: "./podman.fake.sh",
|
||||
},
|
||||
populateFS: func() {
|
||||
script := []byte(`#!/bin/sh
|
||||
case "$*" in
|
||||
"generate kube pod-not-found")
|
||||
exit 125
|
||||
;;
|
||||
esac`)
|
||||
err := os.WriteFile("podman.fake.sh", script, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "command works, returns pod",
|
||||
args: args{
|
||||
name: "my-pod",
|
||||
},
|
||||
fields: fields{
|
||||
podmanCmd: "./podman.fake.sh",
|
||||
},
|
||||
populateFS: func() {
|
||||
script := []byte(`#!/bin/sh
|
||||
case "$*" in
|
||||
"generate kube my-pod")
|
||||
cat <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: my-pod
|
||||
EOF
|
||||
;;
|
||||
esac`)
|
||||
err := os.WriteFile("podman.fake.sh", script, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
checkResult: func(pod *corev1.Pod) {
|
||||
podName := pod.GetName()
|
||||
if podName != "my-pod" {
|
||||
t.Errorf("pod name should be %q but is %q", "my-pod", podName)
|
||||
}
|
||||
},
|
||||
},
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.populateFS != nil {
|
||||
originWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Chdir(originWd)
|
||||
}()
|
||||
cwd := t.TempDir()
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.populateFS()
|
||||
}
|
||||
|
||||
o := &PodmanCli{
|
||||
podmanCmd: tt.fields.podmanCmd,
|
||||
podmanCmdInitTimeout: tt.fields.podmanCmdInitTimeout,
|
||||
containerRunGlobalExtraArgs: tt.fields.containerRunGlobalExtraArgs,
|
||||
containerRunExtraArgs: tt.fields.containerRunExtraArgs,
|
||||
}
|
||||
got, err := o.KubeGenerate(tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("PodmanCli.KubeGenerate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.checkResult != nil {
|
||||
tt.checkResult(got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodmanCli_CleanupPodResources(t *testing.T) {
|
||||
type fields struct {
|
||||
podmanCmd string
|
||||
podmanCmdInitTimeout time.Duration
|
||||
containerRunGlobalExtraArgs []string
|
||||
containerRunExtraArgs []string
|
||||
}
|
||||
type args struct {
|
||||
pod func() *corev1.Pod
|
||||
cleanupVolumes bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
populateFS func()
|
||||
wantErr bool
|
||||
checkResult func()
|
||||
}{
|
||||
{
|
||||
name: "cleanup pod, not volumes",
|
||||
fields: fields{
|
||||
podmanCmd: "./podman.fake.sh",
|
||||
},
|
||||
args: args{
|
||||
pod: func() *corev1.Pod {
|
||||
pod := corev1.Pod{}
|
||||
pod.SetName("my-pod")
|
||||
pod.Spec.Volumes = []corev1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "volume1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "vol2",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "volume2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &pod
|
||||
},
|
||||
cleanupVolumes: false,
|
||||
},
|
||||
populateFS: func() {
|
||||
script := []byte(`#!/bin/sh
|
||||
case "$*" in
|
||||
"pod stop my-pod")
|
||||
touch stop
|
||||
echo my-pod
|
||||
;;
|
||||
"pod rm my-pod")
|
||||
touch rm
|
||||
echo my-pod
|
||||
;;
|
||||
"volume rm volume1")
|
||||
touch volume1
|
||||
;;
|
||||
"volume rm volume2")
|
||||
touch volume2
|
||||
;;
|
||||
esac`)
|
||||
err := os.WriteFile("podman.fake.sh", script, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
checkResult: func() {
|
||||
_, err := os.Stat("stop")
|
||||
if err != nil {
|
||||
t.Errorf("podman stop has not been called")
|
||||
}
|
||||
_, err = os.Stat("rm")
|
||||
if err != nil {
|
||||
t.Errorf("podman rm has not been called")
|
||||
}
|
||||
_, err = os.Stat("volume1")
|
||||
if err == nil {
|
||||
t.Errorf("podman rm volume volume1 has been called, it should not")
|
||||
}
|
||||
_, err = os.Stat("volume2")
|
||||
if err == nil {
|
||||
t.Errorf("podman rm volume volume2 has been called, it should not")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cleanup pod and volumes",
|
||||
fields: fields{
|
||||
podmanCmd: "./podman.fake.sh",
|
||||
},
|
||||
args: args{
|
||||
pod: func() *corev1.Pod {
|
||||
pod := corev1.Pod{}
|
||||
pod.SetName("my-pod")
|
||||
pod.Spec.Volumes = []corev1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "volume1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "vol2",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "volume2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &pod
|
||||
},
|
||||
cleanupVolumes: true,
|
||||
},
|
||||
populateFS: func() {
|
||||
script := []byte(`#!/bin/sh
|
||||
case "$*" in
|
||||
"pod stop my-pod")
|
||||
touch stop
|
||||
echo my-pod
|
||||
;;
|
||||
"pod rm my-pod")
|
||||
touch rm
|
||||
echo my-pod
|
||||
;;
|
||||
"volume rm volume1")
|
||||
touch volume1
|
||||
;;
|
||||
"volume rm volume2")
|
||||
touch volume2
|
||||
;;
|
||||
esac`)
|
||||
err := os.WriteFile("podman.fake.sh", script, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
checkResult: func() {
|
||||
_, err := os.Stat("stop")
|
||||
if err != nil {
|
||||
t.Errorf("podman stop has not been called")
|
||||
}
|
||||
_, err = os.Stat("rm")
|
||||
if err != nil {
|
||||
t.Errorf("podman rm has not been called")
|
||||
}
|
||||
_, err = os.Stat("volume1")
|
||||
if err != nil {
|
||||
t.Errorf("podman rm volume volume1 has not been called")
|
||||
}
|
||||
_, err = os.Stat("volume2")
|
||||
if err != nil {
|
||||
t.Errorf("podman rm volume volume2 has not been called")
|
||||
}
|
||||
},
|
||||
}, // TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.populateFS != nil {
|
||||
originWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Chdir(originWd)
|
||||
}()
|
||||
cwd := t.TempDir()
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.populateFS()
|
||||
}
|
||||
|
||||
o := &PodmanCli{
|
||||
podmanCmd: tt.fields.podmanCmd,
|
||||
podmanCmdInitTimeout: tt.fields.podmanCmdInitTimeout,
|
||||
containerRunGlobalExtraArgs: tt.fields.containerRunGlobalExtraArgs,
|
||||
containerRunExtraArgs: tt.fields.containerRunExtraArgs,
|
||||
}
|
||||
if err := o.CleanupPodResources(tt.args.pod(), tt.args.cleanupVolumes); (err != nil) != tt.wantErr {
|
||||
t.Errorf("PodmanCli.CleanupPodResources() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if tt.checkResult != nil {
|
||||
tt.checkResult()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -35,99 +34,6 @@ var _ = Describe("odo delete command tests", func() {
|
||||
helper.CommonAfterEach(commonVar)
|
||||
})
|
||||
|
||||
When("running odo delete from a non-component directory", func() {
|
||||
var files []string
|
||||
BeforeEach(func() {
|
||||
files = helper.ListFilesInDir(commonVar.Context)
|
||||
Expect(files).ToNot(ContainElement(".odo"))
|
||||
})
|
||||
|
||||
When("the directory is empty", func() {
|
||||
BeforeEach(func() {
|
||||
Expect(len(files)).To(BeZero())
|
||||
})
|
||||
It("should fail", func() {
|
||||
for _, opts := range [][]string{nil, {"--files"}, {"--running-in", "dev"}, {"--running-in", "deploy"}} {
|
||||
title := "using " + strings.Join(opts, "=")
|
||||
if len(opts) == 0 {
|
||||
title = "default"
|
||||
}
|
||||
By(title, func() {
|
||||
args := []string{"delete", "component", "-f"}
|
||||
args = append(args, opts...)
|
||||
errOut := helper.Cmd("odo", args...).ShouldFail().Err()
|
||||
helper.MatchAllInOutput(errOut, []string{"The current directory does not represent an odo component"})
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
When("the directory is not empty", func() {
|
||||
BeforeEach(func() {
|
||||
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
|
||||
})
|
||||
It("should fail", func() {
|
||||
for _, opts := range [][]string{nil, {"--files"}, {"--running-in", "dev"}, {"--running-in", "deploy"}} {
|
||||
title := "using " + strings.Join(opts, "=")
|
||||
if len(opts) == 0 {
|
||||
title = "default"
|
||||
}
|
||||
By(title, func() {
|
||||
args := []string{"delete", "component", "-f"}
|
||||
args = append(args, opts...)
|
||||
errOut := helper.Cmd("odo", args...).ShouldFail().Err()
|
||||
helper.MatchAllInOutput(errOut, []string{"The current directory does not represent an odo component"})
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
It("should fail when using both --files and --name", func() {
|
||||
errOut := helper.Cmd("odo", "delete", "component", "--name", "my-comp", "--files", "-f").ShouldFail().Err()
|
||||
helper.MatchAllInOutput(errOut, []string{"'--files' cannot be used with '--name'; '--files' must be used from a directory containing a Devfile"})
|
||||
})
|
||||
|
||||
It("should fail when passing an invalid running-in and --name", func() {
|
||||
errOut := helper.Cmd("odo", "delete", "component", "--name", "my-comp", "--running-in", "unknown-value").ShouldFail().Err()
|
||||
Expect(errOut).Should(ContainSubstring("invalid value for --running-in: \"unknown-value\". Acceptable values are: dev, deploy"))
|
||||
})
|
||||
|
||||
for _, withDotOdoDir := range []bool{true, false} {
|
||||
withDotOdoDir := withDotOdoDir
|
||||
When(fmt.Sprintf("using --files in a directory where Devfile was not generated by odo: withDotOdoDir=%v", withDotOdoDir), func() {
|
||||
var out string
|
||||
var fileList []string
|
||||
|
||||
BeforeEach(func() {
|
||||
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
|
||||
helper.CopyExampleDevFile(
|
||||
filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"),
|
||||
path.Join(commonVar.Context, "devfile.yaml"),
|
||||
cmpName)
|
||||
if withDotOdoDir {
|
||||
helper.MakeDir(filepath.Join(commonVar.Context, util.DotOdoDirectory))
|
||||
}
|
||||
out = helper.Cmd("odo", "delete", "component", "--files", "-f").ShouldPass().Out()
|
||||
fileList = helper.ListFilesInDir(commonVar.Context)
|
||||
})
|
||||
|
||||
It("should delete the relevant files", func() {
|
||||
By("not listing and deleting the devfile.yaml", func() {
|
||||
Expect(out).ShouldNot(ContainSubstring(filepath.Join(commonVar.Context, "devfile.yaml")))
|
||||
Expect(fileList).Should(ContainElement("devfile.yaml"))
|
||||
})
|
||||
|
||||
if withDotOdoDir {
|
||||
By("listing and deleting the .odo directory", func() {
|
||||
Expect(out).Should(ContainSubstring(filepath.Join(commonVar.Context, util.DotOdoDirectory)))
|
||||
Expect(fileList).ShouldNot(ContainElement(util.DotOdoDirectory))
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
for _, ctx := range []struct {
|
||||
title string
|
||||
devfileName string
|
||||
@@ -169,60 +75,6 @@ var _ = Describe("odo delete command tests", func() {
|
||||
helper.GetExamplePath("source", "devfiles", "nodejs", ctx.devfileName)).ShouldPass()
|
||||
})
|
||||
|
||||
It("should fail when passing an invalid running-in", func() {
|
||||
errOut := helper.Cmd("odo", "delete", "component", "--running-in", "unknown-value", "-f").ShouldFail().Err()
|
||||
Expect(errOut).Should(ContainSubstring("invalid value for --running-in: \"unknown-value\". Acceptable values are: dev, deploy"))
|
||||
})
|
||||
|
||||
It("should fail when passing an invalid running-in and --name from another directory", func() {
|
||||
otherDir := filepath.Join(commonVar.Context, "tmp")
|
||||
helper.MakeDir(otherDir)
|
||||
errOut := helper.Cmd("odo", "delete", "component", "--name", cmpName, "--running-in", "unknown-value", "-f").
|
||||
WithWorkingDir(otherDir).ShouldFail().Err()
|
||||
Expect(errOut).Should(ContainSubstring("invalid value for --running-in: \"unknown-value\". Acceptable values are: dev, deploy"))
|
||||
})
|
||||
|
||||
When("the components are not deployed", func() {
|
||||
for _, runningIn := range []string{"", "dev", "deploy"} {
|
||||
runningIn := runningIn
|
||||
It(fmt.Sprintf("should output that there are no resources to be deleted (running-in=%q)", runningIn), func() {
|
||||
args := []string{"delete", "component"}
|
||||
if runningIn != "" {
|
||||
args = append(args, "--running-in", runningIn)
|
||||
}
|
||||
stdOut := helper.Cmd("odo", append(args, "-f")...).ShouldPass().Out()
|
||||
Expect(stdOut).To(ContainSubstring("No resource found for component %q in namespace %q", cmpName, commonVar.Project))
|
||||
})
|
||||
}
|
||||
|
||||
for _, label := range []string{
|
||||
helper.LabelNoCluster, helper.LabelUnauth,
|
||||
} {
|
||||
label := label
|
||||
It("should work without cluster", Label(label), func() {
|
||||
helper.Cmd("odo", "delete", "component").ShouldPass()
|
||||
})
|
||||
}
|
||||
|
||||
It("should delete the respective files with --files", func() {
|
||||
stdOut := helper.Cmd("odo", "delete", "component", "--files", "-f").ShouldPass().Out()
|
||||
By("not finding resources in namepace", func() {
|
||||
Expect(stdOut).To(ContainSubstring("No resource found for component %q in namespace %q", cmpName, commonVar.Project))
|
||||
})
|
||||
By("listing files that were created by odo and that need to be deleted", func() {
|
||||
Expect(stdOut).To(ContainSubstring("This will delete the following files and directories"))
|
||||
//odo init above create the devfile.yaml, and so created the .odo/generated file as well
|
||||
Expect(stdOut).To(ContainSubstring(filepath.Join(commonVar.Context, "devfile.yaml")))
|
||||
Expect(stdOut).To(ContainSubstring(filepath.Join(commonVar.Context, util.DotOdoDirectory)))
|
||||
})
|
||||
By("deleting the expected files", func() {
|
||||
filesInDir := helper.ListFilesInDir(commonVar.Context)
|
||||
Expect(filesInDir).ShouldNot(ContainElement("devfile.yaml"))
|
||||
Expect(filesInDir).ShouldNot(ContainElement(util.DotOdoDirectory))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
for _, podman := range []bool{true, false} {
|
||||
podman := podman
|
||||
When("the component is deployed in DEV mode and dev mode stopped", helper.LabelPodmanIf(podman, func() {
|
||||
@@ -311,57 +163,6 @@ var _ = Describe("odo delete command tests", func() {
|
||||
})
|
||||
|
||||
Context("the component is deleted while having access to the devfile.yaml", func() {
|
||||
When("the component is deleted without --files", func() {
|
||||
var stdOut string
|
||||
BeforeEach(func() {
|
||||
args := []string{"delete", "component"}
|
||||
if runningIn != "" {
|
||||
args = append(args, "--running-in", runningIn)
|
||||
}
|
||||
stdOut = helper.Cmd("odo", append(args, "-f")...).ShouldPass().Out()
|
||||
})
|
||||
|
||||
if runningIn == "deploy" {
|
||||
It("should output that there are no resources to be deleted", func() {
|
||||
Expect(stdOut).To(ContainSubstring("No resource found for component %q", cmpName))
|
||||
})
|
||||
} else {
|
||||
It("should have deleted the component", func() {
|
||||
By("listing the resources to delete", func() {
|
||||
Expect(stdOut).To(ContainSubstring(cmpName))
|
||||
})
|
||||
By("deleting the deployment", func() {
|
||||
component := helper.NewComponent(cmpName, "app", labels.ComponentDevMode, commonVar.Project, commonVar.CliRunner)
|
||||
component.ExpectIsNotDeployed()
|
||||
})
|
||||
By("ensuring that devfile.yaml and .odo still exists", func() {
|
||||
files := helper.ListFilesInDir(commonVar.Context)
|
||||
Expect(files).To(ContainElement(util.DotOdoDirectory))
|
||||
Expect(files).To(ContainElement("devfile.yaml"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if !podman {
|
||||
When("odo delete command is run again with nothing deployed on the cluster", func() {
|
||||
var stdOut string
|
||||
BeforeEach(func() {
|
||||
// wait until the resources are deleted from the first delete
|
||||
Eventually(string(commonVar.CliRunner.Run(getDeployArgs...).Out.Contents()), 60, 3).ShouldNot(ContainSubstring(deploymentName))
|
||||
Eventually(string(commonVar.CliRunner.Run(getSVCArgs...).Out.Contents()), 60, 3).ShouldNot(ContainSubstring(serviceName))
|
||||
args := []string{"delete", "component"}
|
||||
if runningIn != "" {
|
||||
args = append(args, "--running-in", runningIn)
|
||||
}
|
||||
stdOut = helper.Cmd("odo", append(args, "-f")...).ShouldPass().Out()
|
||||
})
|
||||
It("should output that there are no resources to be deleted", func() {
|
||||
Expect(stdOut).To(ContainSubstring("No resource found for component %q in namespace %q", cmpName, commonVar.Project))
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
When("the component is deleted with --files", func() {
|
||||
var stdOut string
|
||||
BeforeEach(func() {
|
||||
|
||||
Reference in New Issue
Block a user