Enhance project delete + Add UTs (#740)

* Delete project enhancements + unit tests and e2e tests

This PR adds the following:
1. When a project is deleted, the delete project now displays also the
   active project post deletion if there are more projects left post deletion
2. Adds UTs and e2e tests for project delete
3. Handles the case of project list with no projects with a proper message
   indicating that there are no currently available projects and suggests the
   command to create a project instead of displaying an empty table.

fixes #726 #750
Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Addess @codeclimate comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Incoporate @cdrage and @snarwade comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Incoporate @snarwade and @tkral comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Fix gofmt errors

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Incoporate @codeclimate comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Incorporate @cdrage comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Fix travis failures

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Incorporate @tkral comments

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>

* Fix error msgs -- start them with small letters

Signed-off-by: anmolbabu <anmolbudugutta@gmail.com>
This commit is contained in:
anmolbabu
2018-10-19 22:18:00 +05:30
committed by Charlie Drage
parent 65b5bed832
commit c14773a25e
9 changed files with 394 additions and 85 deletions

View File

@@ -124,7 +124,7 @@ var projectDeleteCmd = &cobra.Command{
isValidProject, err := project.Exists(client, projectName)
checkError(err, "Failed to delete project %s", projectName)
if !isValidProject {
fmt.Printf("The project %s does not exist. Please check the list of projects using `odo project list`", projectName)
fmt.Printf("The project %s does not exist. Please check the list of projects using `odo project list`\n", projectName)
os.Exit(1)
}
@@ -140,11 +140,27 @@ var projectDeleteCmd = &cobra.Command{
fmt.Printf("Aborting deletion of project: %v\n", projectName)
}
fmt.Printf("Deleting project %s...\n(this operation may take some time)\n", projectName)
err = project.Delete(client, projectName)
if err != nil {
checkError(err, "")
}
fmt.Printf("Deleted project : %v\n", projectName)
// Get Current Project
currProject := project.GetCurrent(client)
// Check if List returns empty, if so, the currProject is showing old currentProject
// In openshift, when the project is deleted, it does not reset the current project in kube config file which is used by odo for current project
projects, err := project.List(client)
checkError(err, "")
if len(projects) != 0 {
fmt.Printf("%s has been set as the active project\n", currProject)
} else {
// oc errors out as "error: you do not have rights to view project "$deleted_project"."
fmt.Printf("You are not a member of any projects. You can request a project to be created using the `odo project create <project_name>` command\n")
}
},
}
@@ -160,6 +176,10 @@ var projectListCmd = &cobra.Command{
client := getOcClient()
projects, err := project.List(client)
checkError(err, "")
if len(projects) == 0 {
fmt.Println("You are not a member of any projects. You can request a project to be created using the `odo project create <project_name>` command")
return
}
fmt.Printf("ACTIVE NAME\n")
for _, app := range projects {
activeMark := " "

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"io/ioutil"
"k8s.io/apimachinery/pkg/runtime"
"net"
"net/url"
"os"
@@ -17,6 +16,9 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"github.com/redhat-developer/odo/pkg/util"
"github.com/fatih/color"
@@ -84,8 +86,8 @@ type Client struct {
serviceCatalogClient servicecatalogclienset.ServicecatalogV1beta1Interface
routeClient routeclientset.RouteV1Interface
userClient userclientset.UserV1Interface
kubeConfig clientcmd.ClientConfig
namespace string
KubeConfig clientcmd.ClientConfig
Namespace string
}
func New(connectionCheck bool) (*Client, error) {
@@ -94,9 +96,9 @@ func New(connectionCheck bool) (*Client, error) {
// initialize client-go clients
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
client.kubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
client.KubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := client.kubeConfig.ClientConfig()
config, err := client.KubeConfig.ClientConfig()
if err != nil {
return nil, errors.New(err.Error() + errorMsg)
}
@@ -150,11 +152,11 @@ func New(connectionCheck bool) (*Client, error) {
client.userClient = userClient
namespace, _, err := client.kubeConfig.Namespace()
namespace, _, err := client.KubeConfig.Namespace()
if err != nil {
return nil, err
}
client.namespace = namespace
client.Namespace = namespace
// Skip this if connectionCheck is false
if !connectionCheck {
@@ -271,7 +273,7 @@ func isServerUp(server string) bool {
}
func (c *Client) GetCurrentProjectName() string {
return c.namespace
return c.Namespace
}
// GetProjectNames return list of existing projects that user has access to.
@@ -302,7 +304,7 @@ func (c *Client) CreateNewProject(name string) error {
}
func (c *Client) SetCurrentProject(project string) error {
rawConfig, err := c.kubeConfig.RawConfig()
rawConfig, err := c.KubeConfig.RawConfig()
if err != nil {
return errors.Wrapf(err, "unable to switch to %s project", project)
}
@@ -585,7 +587,7 @@ func (c *Client) NewAppS2I(commonObjectMeta metav1.ObjectMeta, builderImage stri
is := imagev1.ImageStream{
ObjectMeta: commonObjectMeta,
}
_, err = c.imageClient.ImageStreams(c.namespace).Create(&is)
_, err = c.imageClient.ImageStreams(c.Namespace).Create(&is)
if err != nil {
return errors.Wrapf(err, "unable to create ImageStream for %s", commonObjectMeta.Name)
}
@@ -604,7 +606,7 @@ func (c *Client) NewAppS2I(commonObjectMeta metav1.ObjectMeta, builderImage stri
// Generate and create the DeploymentConfig
dc := generateGitDeploymentConfig(commonObjectMeta, buildConfig.Spec.Output.To.Name, containerPorts, inputEnvVars)
_, err = c.appsClient.DeploymentConfigs(c.namespace).Create(&dc)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Create(&dc)
if err != nil {
return errors.Wrapf(err, "unable to create DeploymentConfig for %s", commonObjectMeta.Name)
}
@@ -669,7 +671,7 @@ func (c *Client) BootstrapSupervisoredS2I(commonObjectMeta metav1.ObjectMeta, bu
is := imagev1.ImageStream{
ObjectMeta: commonObjectMeta,
}
_, err = c.imageClient.ImageStreams(c.namespace).Create(&is)
_, err = c.imageClient.ImageStreams(c.Namespace).Create(&is)
if err != nil {
return errors.Wrapf(err, "unable to create ImageStream for %s", commonObjectMeta.Name)
}
@@ -697,7 +699,7 @@ func (c *Client) BootstrapSupervisoredS2I(commonObjectMeta metav1.ObjectMeta, bu
}
}
_, err = c.appsClient.DeploymentConfigs(c.namespace).Create(&dc)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Create(&dc)
if err != nil {
return errors.Wrapf(err, "unable to create DeploymentConfig for %s", commonObjectMeta.Name)
}
@@ -741,7 +743,7 @@ func (c *Client) CreateService(commonObjectMeta metav1.ObjectMeta, containerPort
},
},
}
_, err := c.kubeClient.CoreV1().Services(c.namespace).Create(&svc)
_, err := c.kubeClient.CoreV1().Services(c.Namespace).Create(&svc)
if err != nil {
return errors.Wrapf(err, "unable to create Service for %s", commonObjectMeta.Name)
}
@@ -788,7 +790,7 @@ func (c *Client) UpdateBuildConfig(buildConfigName string, projectName string, g
}
buildConfig.Spec.Source = buildSource
buildConfig.Annotations = annotations
_, err = c.buildClient.BuildConfigs(c.namespace).Update(buildConfig)
_, err = c.buildClient.BuildConfigs(c.Namespace).Update(buildConfig)
if err != nil {
return errors.Wrap(err, "unable to update the component")
}
@@ -803,7 +805,7 @@ func (c *Client) UpdateBuildConfig(buildConfigName string, projectName string, g
func (c *Client) PatchCurrentDC(name string, dc appsv1.DeploymentConfig) error {
// Retrieve the current DC
currentDC, err := c.GetDeploymentConfigFromName(name, c.namespace)
currentDC, err := c.GetDeploymentConfigFromName(name, c.Namespace)
if err != nil {
return errors.Wrapf(err, "unable to get DeploymentConfig %s", name)
}
@@ -857,7 +859,7 @@ func (c *Client) PatchCurrentDC(name string, dc appsv1.DeploymentConfig) error {
// Update the current one that's deployed with the new Spec.
// despite the "patch" function name, we use update since `.Patch` requires
// use to define each and every object we must change. Updating makes it easier.
_, err = c.appsClient.DeploymentConfigs(c.namespace).Update(currentDC)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Update(currentDC)
if err != nil {
return errors.Wrapf(err, "unable to update DeploymentConfig %s", name)
}
@@ -875,7 +877,7 @@ func (c *Client) UpdateDCToGit(commonObjectMeta metav1.ObjectMeta, imageName str
}
// Retrieve the current DC in order to obtain what the current inputPorts are..
currentDC, err := c.GetDeploymentConfigFromName(commonObjectMeta.Name, c.namespace)
currentDC, err := c.GetDeploymentConfigFromName(commonObjectMeta.Name, c.Namespace)
if err != nil {
return errors.Wrapf(err, "unable to get DeploymentConfig %s", commonObjectMeta.Name)
}
@@ -915,7 +917,7 @@ func (c *Client) UpdateDCToSupervisor(commonObjectMeta metav1.ObjectMeta, compon
imageNS = imageStream.ObjectMeta.Namespace
// Retrieve the current DC in order to obtain what the current inputPorts are..
currentDC, err := c.GetDeploymentConfigFromName(commonObjectMeta.Name, c.namespace)
currentDC, err := c.GetDeploymentConfigFromName(commonObjectMeta.Name, c.Namespace)
if err != nil {
return errors.Wrapf(err, "unable to get DeploymentConfig %s", commonObjectMeta.Name)
}
@@ -962,13 +964,13 @@ func (c *Client) UpdateDCToSupervisor(commonObjectMeta metav1.ObjectMeta, compon
// dcName is the name of the DeploymentConfig file to be updated
// annotations contains the annotations for the DeploymentConfig file
func (c *Client) UpdateDCAnnotations(dcName string, annotations map[string]string) error {
dc, err := c.GetDeploymentConfigFromName(dcName, c.namespace)
dc, err := c.GetDeploymentConfigFromName(dcName, c.Namespace)
if err != nil {
return errors.Wrapf(err, "unable to get DeploymentConfig %s", dcName)
}
dc.Annotations = annotations
_, err = c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
if err != nil {
return errors.Wrapf(err, "unable to uDeploymentConfig config %s", dcName)
}
@@ -994,7 +996,7 @@ func (c *Client) SetupForSupervisor(dcName string, projectName string, annotatio
addBootstrapVolumeMount(dc, dcName)
_, err = c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
if err != nil {
return errors.Wrapf(err, "unable to uDeploymentConfig config %s", dcName)
}
@@ -1033,7 +1035,7 @@ func (c *Client) CleanupAfterSupervisor(dcName string, projectName string, annot
}
}
_, err = c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
if err != nil {
return errors.Wrapf(err, "unable to update deployment config %s", dcName)
}
@@ -1049,7 +1051,7 @@ func (c *Client) CleanupAfterSupervisor(dcName string, projectName string, annot
// buildConfigName is the name of the buildConfig for which we are fetching the build name
// returns the name of the latest build or the error
func (c *Client) GetLatestBuildName(buildConfigName string) (string, error) {
buildConfig, err := c.buildClient.BuildConfigs(c.namespace).Get(buildConfigName, metav1.GetOptions{})
buildConfig, err := c.buildClient.BuildConfigs(c.Namespace).Get(buildConfigName, metav1.GetOptions{})
if err != nil {
return "", errors.Wrap(err, "unable to get the latest build name")
}
@@ -1064,7 +1066,7 @@ func (c *Client) StartBuild(name string) (string, error) {
Name: name,
},
}
result, err := c.buildClient.BuildConfigs(c.namespace).Instantiate(name, &buildRequest)
result, err := c.buildClient.BuildConfigs(c.Namespace).Instantiate(name, &buildRequest)
if err != nil {
return "", errors.Wrapf(err, "unable to instantiate BuildConfig for %s", name)
}
@@ -1077,7 +1079,7 @@ func (c *Client) StartBuild(name string) (string, error) {
func (c *Client) WaitForBuildToFinish(buildName string) error {
glog.V(4).Infof("Waiting for %s build to finish", buildName)
w, err := c.buildClient.Builds(c.namespace).Watch(metav1.ListOptions{
w, err := c.buildClient.Builds(c.Namespace).Watch(metav1.ListOptions{
FieldSelector: fields.Set{"metadata.name": buildName}.AsSelector().String(),
})
if err != nil {
@@ -1107,7 +1109,7 @@ func (c *Client) WaitForBuildToFinish(buildName string) error {
func (c *Client) WaitAndGetPod(selector string) (*corev1.Pod, error) {
glog.V(4).Infof("Waiting for %s pod", selector)
w, err := c.kubeClient.CoreV1().Pods(c.namespace).Watch(metav1.ListOptions{
w, err := c.kubeClient.CoreV1().Pods(c.Namespace).Watch(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
@@ -1141,7 +1143,7 @@ func (c *Client) FollowBuildLog(buildName string, stdout io.Writer) error {
}
rd, err := c.buildClient.RESTClient().Get().
Namespace(c.namespace).
Namespace(c.Namespace).
Resource("builds").
Name(buildName).
SubResource("log").
@@ -1181,7 +1183,7 @@ func (c *Client) DisplayDeploymentConfigLog(deploymentConfigName string, followL
// RESTClient call to OpenShift
rd, err := c.appsClient.RESTClient().Get().
Namespace(c.namespace).
Namespace(c.Namespace).
Name(deploymentConfigName).
Resource("deploymentconfigs").
SubResource("log").
@@ -1235,43 +1237,43 @@ func (c *Client) Delete(labels map[string]string) error {
var errorList []string
// Delete DeploymentConfig
glog.V(4).Info("Deleting DeploymentConfigs")
err := c.appsClient.DeploymentConfigs(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
err := c.appsClient.DeploymentConfigs(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to delete deploymentconfig")
}
// Delete Route
glog.V(4).Info("Deleting Routes")
err = c.routeClient.Routes(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
err = c.routeClient.Routes(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to delete route")
}
// Delete BuildConfig
glog.V(4).Info("Deleting BuildConfigs")
err = c.buildClient.BuildConfigs(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
err = c.buildClient.BuildConfigs(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to delete buildconfig")
}
// Delete ImageStream
glog.V(4).Info("Deleting ImageStreams")
err = c.imageClient.ImageStreams(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
err = c.imageClient.ImageStreams(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to delete imagestream")
}
// Delete Services
glog.V(4).Info("Deleting Services")
svcList, err := c.kubeClient.CoreV1().Services(c.namespace).List(metav1.ListOptions{LabelSelector: selector})
svcList, err := c.kubeClient.CoreV1().Services(c.Namespace).List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to list services")
}
for _, svc := range svcList.Items {
err = c.kubeClient.CoreV1().Services(c.namespace).Delete(svc.Name, &metav1.DeleteOptions{})
err = c.kubeClient.CoreV1().Services(c.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
if err != nil {
errorList = append(errorList, "unable to delete service")
}
}
// PersistentVolumeClaim
glog.V(4).Infof("Deleting PersistentVolumeClaims")
err = c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
err = c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
errorList = append(errorList, "unable to delete volume")
}
@@ -1294,7 +1296,7 @@ func (c *Client) DeleteServiceInstance(labels map[string]string) error {
glog.V(4).Infof("Selectors used for deletion: %s", selector)
// Listing out serviceInstance because `DeleteCollection` method don't work on serviceInstance
svcCatList, err := c.GetServiceInstanceList(c.namespace, selector)
svcCatList, err := c.GetServiceInstanceList(c.Namespace, selector)
if err != nil {
return errors.Wrap(err, "unable to list service instance")
}
@@ -1302,13 +1304,13 @@ func (c *Client) DeleteServiceInstance(labels map[string]string) error {
// Iterating over serviceInstance List and deleting one by one
for _, svc := range svcCatList {
// we need to delete the ServiceBinding before deleting the ServiceInstance
err = c.serviceCatalogClient.ServiceBindings(c.namespace).Delete(svc.Name, &metav1.DeleteOptions{})
err = c.serviceCatalogClient.ServiceBindings(c.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
if err != nil {
return errors.Wrap(err, "unable to delete serviceBinding")
}
// now we perform the actual deletion
err = c.serviceCatalogClient.ServiceInstances(c.namespace).Delete(svc.Name, &metav1.DeleteOptions{})
err = c.serviceCatalogClient.ServiceInstances(c.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
if err != nil {
return errors.Wrap(err, "unable to delete serviceInstance")
}
@@ -1323,7 +1325,62 @@ func (c *Client) DeleteProject(name string) error {
if err != nil {
return errors.Wrap(err, "unable to delete project")
}
return nil
// wait for delete to complete
w, err := c.projectClient.Projects().Watch(metav1.ListOptions{
FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(),
})
if err != nil {
return errors.Wrapf(err, "unable to watch project")
}
defer w.Stop()
for {
val, ok := <-w.ResultChan()
// When marked for deletion... val looks like:
/*
val: {
Type:MODIFIED
Object:&Project{
ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{...},
Spec:ProjectSpec{...},
Status:ProjectStatus{
Phase:Terminating,
},
}
}
*/
// Post deletion val will look like:
/*
val: {
Type:DELETED
Object:&Project{
ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{...},
Spec:ProjectSpec{...},
Status:ProjectStatus{
Phase:,
},
}
}
*/
if !ok {
return fmt.Errorf("received unexpected signal %+v on project watch channel", val)
}
// So we depend on val.Type as val.Object.Status.Phase is just empty string and not a mapped value constant
if prj, ok := val.Object.(*projectv1.Project); ok {
glog.V(4).Infof("Status of delete of project %s is %s", name, prj.Status.Phase)
switch prj.Status.Phase {
//prj.Status.Phase can only be "Terminating" or "Active" or ""
case "":
if val.Type == watch.Deleted {
return nil
}
if val.Type == watch.Error {
return fmt.Errorf("failed watching the deletion of project %s", name)
}
}
}
}
}
// GetLabelValues get label values of given label from objects in project that are matching selector
@@ -1383,7 +1440,7 @@ func (c *Client) CreateServiceInstance(serviceName string, serviceType string, s
return errors.Wrap(err, "unable to create the service instance parameters")
}
_, err = c.serviceCatalogClient.ServiceInstances(c.namespace).Create(
_, err = c.serviceCatalogClient.ServiceInstances(c.Namespace).Create(
&scv1beta1.ServiceInstance{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceInstance",
@@ -1391,7 +1448,7 @@ func (c *Client) CreateServiceInstance(serviceName string, serviceType string, s
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: c.namespace,
Namespace: c.Namespace,
Labels: labels,
},
Spec: scv1beta1.ServiceInstanceSpec{
@@ -1408,7 +1465,7 @@ func (c *Client) CreateServiceInstance(serviceName string, serviceType string, s
}
// Create the secret containing the parameters of the plan selected.
err = c.CreateServiceBinding(serviceName, c.namespace, parameters)
err = c.CreateServiceBinding(serviceName, c.Namespace, parameters)
if err != nil {
return errors.Wrapf(err, "unable to create the secret %s for the service instance", serviceName)
}
@@ -1597,7 +1654,7 @@ func (c *Client) CreateRoute(name string, serviceName string, portNumber intstr.
},
},
}
r, err := c.routeClient.Routes(c.namespace).Create(route)
r, err := c.routeClient.Routes(c.Namespace).Create(route)
if err != nil {
return nil, errors.Wrap(err, "error creating route")
}
@@ -1606,7 +1663,7 @@ func (c *Client) CreateRoute(name string, serviceName string, portNumber intstr.
// DeleteRoute deleted the given route
func (c *Client) DeleteRoute(name string) error {
err := c.routeClient.Routes(c.namespace).Delete(name, &metav1.DeleteOptions{})
err := c.routeClient.Routes(c.Namespace).Delete(name, &metav1.DeleteOptions{})
if err != nil {
return errors.Wrap(err, "unable to delete route")
}
@@ -1615,7 +1672,7 @@ func (c *Client) DeleteRoute(name string) error {
// ListRoutes lists all the routes based on the given label selector
func (c *Client) ListRoutes(labelSelector string) ([]routev1.Route, error) {
routeList, err := c.routeClient.Routes(c.namespace).List(metav1.ListOptions{
routeList, err := c.routeClient.Routes(c.Namespace).List(metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
@@ -1666,7 +1723,7 @@ func (c *Client) CreatePVC(name string, size string, labels map[string]string) (
},
}
createdPvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).Create(pvc)
createdPvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Create(pvc)
if err != nil {
return nil, errors.Wrap(err, "unable to create PVC")
}
@@ -1675,7 +1732,7 @@ func (c *Client) CreatePVC(name string, size string, labels map[string]string) (
// DeletePVC deletes the given PVC by name
func (c *Client) DeletePVC(name string) error {
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).Delete(name, nil)
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(name, nil)
}
// DeleteBuildConfig deletes the given BuildConfig by name using CommonObjectMeta..
@@ -1687,7 +1744,7 @@ func (c *Client) DeleteBuildConfig(commonObjectMeta metav1.ObjectMeta) error {
// Delete BuildConfig
glog.V(4).Info("Deleting BuildConfigs with DeleteBuildConfig")
return c.buildClient.BuildConfigs(c.namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
return c.buildClient.BuildConfigs(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
}
// generateVolumeNameFromPVC generates a random volume name based on the name
@@ -1725,7 +1782,7 @@ func (c *Client) AddPVCToDeploymentConfig(dc *appsv1.DeploymentConfig, pvc strin
)
glog.V(4).Infof("Updating DeploymentConfig: %v", dc)
_, err := c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, err := c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
if err != nil {
return errors.Wrapf(err, "failed to update DeploymentConfig: %v", dc)
}
@@ -1767,7 +1824,7 @@ func removeVolumeMountFromDC(vm string, dc *appsv1.DeploymentConfig) bool {
func (c *Client) RemoveVolumeFromDeploymentConfig(pvc string, dcName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
dc, err := c.GetDeploymentConfigFromName(dcName, c.namespace)
dc, err := c.GetDeploymentConfigFromName(dcName, c.Namespace)
if err != nil {
return errors.Wrapf(err, "unable to get Deployment Config: %v", dcName)
}
@@ -1792,7 +1849,7 @@ func (c *Client) RemoveVolumeFromDeploymentConfig(pvc string, dcName string) err
return fmt.Errorf("could not find volumeMount: %v in Deployment Config: %v", volumeName, dc)
}
_, updateErr := c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, updateErr := c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
return updateErr
})
if retryErr != nil {
@@ -1825,7 +1882,7 @@ func (c *Client) getVolumeNamesFromPVC(pvc string, dc *appsv1.DeploymentConfig)
// GetDeploymentConfigsFromSelector returns an array of Deployment Config
// resources which match the given selector
func (c *Client) GetDeploymentConfigsFromSelector(selector string) ([]appsv1.DeploymentConfig, error) {
dcList, err := c.appsClient.DeploymentConfigs(c.namespace).List(metav1.ListOptions{
dcList, err := c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
@@ -1837,7 +1894,7 @@ func (c *Client) GetDeploymentConfigsFromSelector(selector string) ([]appsv1.Dep
// GetServicesFromSelector returns an array of Service resources which match the
// given selector
func (c *Client) GetServicesFromSelector(selector string) ([]corev1.Service, error) {
serviceList, err := c.kubeClient.CoreV1().Services(c.namespace).List(metav1.ListOptions{
serviceList, err := c.kubeClient.CoreV1().Services(c.Namespace).List(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
@@ -1860,7 +1917,7 @@ func (c *Client) GetDeploymentConfigFromName(name string, project string) (*apps
// GetPVCsFromSelector returns the PVCs based on the given selector
func (c *Client) GetPVCsFromSelector(selector string) ([]corev1.PersistentVolumeClaim, error) {
pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).List(metav1.ListOptions{
pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).List(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
@@ -1909,7 +1966,7 @@ func (c *Client) GetOneDeploymentConfigFromSelector(selector string) (*appsv1.De
// An error is thrown when exactly one Pod is not found.
func (c *Client) GetOnePodFromSelector(selector string) (*corev1.Pod, error) {
pods, err := c.kubeClient.CoreV1().Pods(c.namespace).List(metav1.ListOptions{
pods, err := c.kubeClient.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{
LabelSelector: selector,
})
if err != nil {
@@ -2120,7 +2177,7 @@ func (c *Client) AddEnvironmentVariablesToDeploymentConfig(envs []corev1.EnvVar,
dc.Spec.Template.Spec.Containers[0].Env = append(dc.Spec.Template.Spec.Containers[0].Env, envs...)
_, err := c.appsClient.DeploymentConfigs(c.namespace).Update(dc)
_, err := c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
if err != nil {
return errors.Wrapf(err, "unable to update Deployment Config %v", dc.Name)
}
@@ -2141,7 +2198,7 @@ func (c *Client) GetServerVersion() (*serverInfo, error) {
var info serverInfo
// This will fetch the information about Server Address
config, err := c.kubeConfig.ClientConfig()
config, err := c.KubeConfig.ClientConfig()
if err != nil {
return nil, errors.Wrapf(err, "unable to get server's address")
}
@@ -2177,7 +2234,7 @@ func (c *Client) ExecCMDInContainer(podName string, cmd []string, stdout io.Writ
req := c.kubeClient.CoreV1().RESTClient().
Post().
Namespace(c.namespace).
Namespace(c.Namespace).
Resource("pods").
Name(podName).
SubResource("exec").
@@ -2189,7 +2246,7 @@ func (c *Client) ExecCMDInContainer(podName string, cmd []string, stdout io.Writ
TTY: tty,
}, scheme.ParameterCodec)
config, err := c.kubeConfig.ClientConfig()
config, err := c.KubeConfig.ClientConfig()
if err != nil {
return errors.Wrapf(err, "unable to get Kubernetes client config")
}
@@ -2249,13 +2306,13 @@ func (c *Client) GetPVCNameFromVolumeMountName(volumeMountName string, dc *appsv
// GetPVCFromName returns the PVC of the given name
func (c *Client) GetPVCFromName(pvcName string) (*corev1.PersistentVolumeClaim, error) {
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).Get(pvcName, metav1.GetOptions{})
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Get(pvcName, metav1.GetOptions{})
}
// UpdatePVCLabels updates the given PVC with the given labels
func (c *Client) UpdatePVCLabels(pvc *corev1.PersistentVolumeClaim, labels map[string]string) error {
pvc.Labels = labels
_, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.namespace).Update(pvc)
_, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(pvc)
if err != nil {
return errors.Wrap(err, "unable to remove storage label from PVC")
}
@@ -2325,7 +2382,7 @@ func (c *Client) CreateBuildConfig(commonObjectMeta metav1.ObjectMeta, builderIm
if len(envVars) > 0 {
bc.Spec.Strategy.SourceStrategy.Env = envVars
}
_, err = c.buildClient.BuildConfigs(c.namespace).Create(&bc)
_, err = c.buildClient.BuildConfigs(c.Namespace).Create(&bc)
if err != nil {
return buildv1.BuildConfig{}, errors.Wrapf(err, "unable to create BuildConfig for %s", commonObjectMeta.Name)
}

View File

@@ -2061,7 +2061,7 @@ func TestGetExposedPorts(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fkclient, fkclientset := FakeNew()
fkclient.namespace = "testing"
fkclient.Namespace = "testing"
fkclientset.ImageClientset.PrependReactor("get", "imagestreamimages", func(action ktesting.Action) (bool, runtime.Object, error) {
return true, fakeImageStreamImage("python", []string{"8080/tcp"}), nil
@@ -2313,7 +2313,7 @@ func TestGetImageStream(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fkclient, fkclientset := FakeNew()
fkclient.namespace = "testing"
fkclient.Namespace = "testing"
openshiftIS := fakeImageStream(tt.imageName, "openshift", []string{"latest", "3.5"})
currentNSIS := fakeImageStream(tt.imageName, "testing", []string{"latest"})

View File

@@ -67,15 +67,6 @@ func Delete(client *occlient.Client, projectName string) error {
}
}
// If there will be any projects post the current deletion,
// Choose the first project from remainder of the project list to set as current
if len(projects) > 0 {
currentProject = projects[0].Name
} else {
// Set the current project to empty string
currentProject = ""
}
// If current project is not same as the project to be deleted, set it as current
if currentProject != projectName {
// Set the project to be deleted as current inorder to be able to delete it
@@ -91,6 +82,15 @@ func Delete(client *occlient.Client, projectName string) error {
return errors.Wrap(err, "unable to delete project")
}
// If there will be any projects post the current deletion,
// Choose the first project from remainder of the project list to set as current
if len(projects) > 0 {
currentProject = projects[0].Name
} else {
// Set the current project to empty string
currentProject = ""
}
// If current project is not empty string, set currentProject as current project
if currentProject != "" {
glog.V(4).Infof("Setting the current project to %s\n", currentProject)

View File

@@ -1,18 +1,18 @@
package project
// ToDo(@anmolbabu) uncomment tests when we have a nicer and cleaner way to stub occlient.go#ModifyConfig
/*
import (
"reflect"
"os"
"testing"
"github.com/redhat-developer/odo/pkg/occlient"
"github.com/redhat-developer/odo/pkg/testingutil"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
ktesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd"
)
func TestDelete(t *testing.T) {
tests := []struct {
name string
@@ -25,19 +25,45 @@ func TestDelete(t *testing.T) {
projectName: "prj2",
},
{
name: "Test only project delete",
name: "Test delete the only remaining project",
wantErr: false,
projectName: "prj1",
projectName: "testing",
},
}
odoConfigFile, kubeConfigFile, err := testingutil.SetUp(
testingutil.ConfigDetails{
FileName: "odo-test-config",
Config: testingutil.FakeOdoConfig("odo-test-config"),
ConfigPathEnv: "ODOCONFIG",
}, testingutil.ConfigDetails{
FileName: "kube-test-config",
Config: testingutil.FakeKubeClientConfig(),
ConfigPathEnv: "KUBECONFIG",
},
)
defer testingutil.CleanupEnv([]*os.File{odoConfigFile, kubeConfigFile}, t)
if err != nil {
t.Errorf("failed to create mock odo and kube config files. Error %v", err)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Fake the client with the appropriate arguments
client, fakeClientSet := occlient.FakeNew()
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
client.KubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
client.Namespace = "testing"
fkWatch := watch.NewFake()
fakeClientSet.ProjClientset.PrependReactor("list", "projects", func(action ktesting.Action) (bool, runtime.Object, error) {
if tt.name == "Test delete the only remaining project" {
return true, testingutil.FakeOnlyOneExistingProjects(), nil
}
return true, testingutil.FakeProjects(), nil
})
@@ -45,8 +71,15 @@ func TestDelete(t *testing.T) {
return true, nil, nil
})
go func() {
fkWatch.Delete(testingutil.FakeProjectStatus(corev1.NamespacePhase(""), tt.projectName))
}()
fakeClientSet.ProjClientset.PrependWatchReactor("projects", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) {
return true, fkWatch, nil
})
// The function we are testing
err := Delete(client, tt.projectName)
err = Delete(client, tt.projectName)
// Checks for error in positive cases
if !tt.wantErr == (err != nil) {
@@ -55,4 +88,3 @@ func TestDelete(t *testing.T) {
})
}
}
*/

169
pkg/testingutil/configs.go Normal file
View File

@@ -0,0 +1,169 @@
package testingutil
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"testing"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/redhat-developer/odo/pkg/config"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
// ConfigDetails struct holds configuration details(odo and/or kube config)
type ConfigDetails struct {
FileName string
Config interface{}
ConfigPathEnv string
}
// getConfFolder generates a mock config folder for the unit testing
func getConfFolder() (string, error) {
currentUser, err := user.Current()
if err != nil {
return "", err
}
dir, err := ioutil.TempDir(currentUser.HomeDir, ".kube")
if err != nil {
return "", err
}
return dir, nil
}
// setupTempConfigFile takes config file name - confFile and creates it for unit testing
// The invocation of setupTempConfigFile puts the onus of invoking the configCleanUp as well
func setupTempConfigFile(confFile string) (*os.File, error) {
confFolder, err := getConfFolder()
if err != nil {
return nil, err
}
tmpfile, err := ioutil.TempFile(confFolder, confFile)
if err != nil {
return nil, errors.Wrapf(err, "unable to create test config file")
}
return tmpfile, nil
}
// setupEnv takes odoConfigFile name and sets env var ODOCONFIG to odoConfigFile
// The config logic relies on this env var(if present) to read and/or write config
func setupEnv(envName string, odoconfigfile string) error {
err := os.Setenv(envName, odoconfigfile)
if err != nil {
return errors.Wrap(err, "unable to set ODOCONFIG to odo-test-config")
}
return nil
}
// SetUp sets up the odo and kube config files and returns respective conf file pointers and error
func SetUp(odoConfigDetails ConfigDetails, kubeConfigDetails ConfigDetails) (*os.File, *os.File, error) {
odoConfigFile, err := setUpConfig(odoConfigDetails.FileName, odoConfigDetails.Config, odoConfigDetails.ConfigPathEnv)
if err != nil {
return odoConfigFile, nil, err
}
kubeConfigFile, err := setUpConfig(kubeConfigDetails.FileName, kubeConfigDetails.Config, kubeConfigDetails.ConfigPathEnv)
return odoConfigFile, kubeConfigFile, err
}
// setUpConfig sets up mock config
// Parameters:
// conf: the config object to write to the mock config file
// testFile: the name of the mock config file
// configEnvName: Name of env variable that corresponds to config file
// Returns:
// file handler for the mock config file
// error if any
func setUpConfig(testFile string, conf interface{}, configEnvName string) (*os.File, error) {
foundConfigType := false
var err error
var data []byte
if conf, ok := conf.(config.ConfigInfo); ok {
data, err = yaml.Marshal(conf.Config)
foundConfigType = true
}
if conf, ok := conf.(clientcmdapi.Config); ok {
data, err = yaml.Marshal(conf)
foundConfigType = true
}
if conf, ok := conf.(string); ok {
data = []byte(conf)
foundConfigType = true
}
if err != nil {
return nil, errors.Wrap(err, "unable to create mock config file")
}
if !foundConfigType {
return nil, fmt.Errorf("Config %+v not of recognisable type", conf)
}
configFile, err := setupTempConfigFile(testFile)
if err != nil {
return nil, errors.Wrap(err, "unable to create mock config file")
}
if conf != nil {
if _, err := configFile.Write(data); err != nil {
return nil, errors.Wrapf(err, "unable to write config %+v to mock config file %s", conf, configFile.Name())
}
}
return configFile, setupEnv(configEnvName, configFile.Name())
}
// CleanupEnv cleans up the mock config file and anything that SetupEnv generated
// Parameters:
// configFile: the mock config file handler
// t: testing pointer to log errors if any
func CleanupEnv(confFiles []*os.File, t *testing.T) {
for _, confFile := range confFiles {
if confFile == nil {
continue
}
if err := confFile.Close(); err != nil {
t.Errorf("failed to cleanup the test env. Error: %v", err)
}
os.Remove(confFile.Name())
}
}
// FakeOdoConfig returns mock odo config
// It takes a confPath which is the path to the config
func FakeOdoConfig(confPath string) config.ConfigInfo {
odoConfig := config.ConfigInfo{
Filename: confPath,
Config: config.Config{
ActiveApplications: []config.ApplicationInfo{
{
Name: "app-india",
Active: true,
Project: "prj1",
ActiveComponent: "comp1",
},
},
},
}
return odoConfig
}
// FakeKubeClientConfig returns mock kube client config
func FakeKubeClientConfig() string {
return `apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://192.168.42.237:8443
name: 192-168-42-237:8443
contexts:
- context:
cluster: 192-168-42-237:8443
namespace: testing
user: developer/192-168-42-237:8443
name: myproject/192-168-42-237:8443/developer
current-context: myproject/192-168-42-237:8443/developer
kind: Config
preferences: {}
users:
- name: developer/192-168-42-237:8443
user:
token: C0E6Gkmi3n_Se2QKx6Unw3Y3Zu4mJHgzdrMVK0DsDwc`
}

View File

@@ -1,7 +1,9 @@
package testingutil
import (
projectv1 "github.com/openshift/api/project/v1"
v1 "github.com/openshift/api/project/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -23,3 +25,32 @@ func FakeProjects() *v1.ProjectList {
},
}
}
// FakeProjectStatus returns fake project status for use by mock watch on project
func FakeProjectStatus(prjStatus corev1.NamespacePhase, prjName string) *projectv1.Project {
return &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: prjName,
},
Status: projectv1.ProjectStatus{Phase: prjStatus},
}
}
// FakeOnlyOneExistingProjects returns fake projectlist with single project for use by API mock functions for Unit tests testing delete of the only available project
func FakeOnlyOneExistingProjects() *v1.ProjectList {
return &v1.ProjectList{
Items: []v1.Project{
getFakeProject("testing"),
},
}
}
// FakeRemoveProject removes the delete requested project from the list of projects passed
func FakeRemoveProject(project string, projects *v1.ProjectList) *v1.ProjectList {
for index, proj := range projects.Items {
if proj.Name == project {
projects.Items = append(projects.Items[:index], projects.Items[index+1:]...)
}
}
return projects
}

View File

@@ -332,7 +332,7 @@ var _ = Describe("odoCmpE2e", func() {
It("should delete the application", func() {
runCmd("odo app delete " + appTestName + " -f")
runCmd("odo project delete " + projName)
runCmd("odo project delete " + projName + " -f")
waitForDeleteCmd("odo project list", projName)
})
})

View File

@@ -548,7 +548,7 @@ var _ = Describe("odoe2e", func() {
cmpList := runCmd("odo list")
Expect(cmpList).NotTo(ContainSubstring("nodejs"))
runCmd("odo project delete " + projName)
runCmd("odo project delete " + projName + " -f")
waitForDeleteCmd("odo project list", projName)
})
})