mirror of
https://github.com/redhat-developer/odo.git
synced 2025-10-19 03:06:19 +03:00
Storage workflow (#1547)
* Modifies storage according to the new workflow Adds test Signed-off-by: mik-dass <mrinald7@gmail.com> * Added e2e tests Signed-off-by: mik-dass <mrinald7@gmail.com> * Added comments for CreateArgs and DeleteFromConfigurationList * Rebased the PR Signed-off-by: mik-dass <mrinald7@gmail.com> * Mereged storage e2e test with watch test
This commit is contained in:
committed by
OpenShift Merge Robot
parent
af6d83186b
commit
ffcde3baa0
@@ -77,13 +77,14 @@ jobs:
|
||||
|
||||
- <<: *base-test
|
||||
stage: test
|
||||
name: "Watch e2e tests"
|
||||
name: "Watch and Storage e2e tests"
|
||||
script:
|
||||
- ./scripts/oc-cluster.sh
|
||||
- make bin
|
||||
- sudo cp odo /usr/bin
|
||||
- oc login -u developer
|
||||
- make test-watch-e2e
|
||||
- travis_wait make test-watch-e2e
|
||||
- travis_wait make test-storage-e2e
|
||||
|
||||
# test installation script on linux
|
||||
# - stage: test
|
||||
|
||||
7
Makefile
7
Makefile
@@ -133,11 +133,16 @@ test-service-e2e:
|
||||
test-link-e2e:
|
||||
go test -v github.com/openshift/odo/tests/integration --ginkgo.focus="odoLinkE2e" -ginkgo.slowSpecThreshold=$(SLOW_SPEC_THRESHOLD) -ginkgo.v -timeout $(TIMEOUT)
|
||||
|
||||
# Run link e2e tests
|
||||
# Run watch e2e tests
|
||||
.PHONY: test-watch-e2e
|
||||
test-watch-e2e:
|
||||
go test -v github.com/openshift/odo/tests/integration --ginkgo.focus="odoWatchE2e" -ginkgo.slowSpecThreshold=$(SLOW_SPEC_THRESHOLD) -ginkgo.v -timeout $(TIMEOUT)
|
||||
|
||||
# Run storage e2e tests
|
||||
.PHONY: test-storage-e2e
|
||||
test-storage-e2e:
|
||||
go test -v github.com/openshift/odo/tests/integration --ginkgo.focus="odoStorageE2e" -ginkgo.slowSpecThreshold=$(SLOW_SPEC_THRESHOLD) -ginkgo.v -timeout $(TIMEOUT)
|
||||
|
||||
# Run login e2e tests
|
||||
.PHONY: test-odo-login-e2e
|
||||
test-odo-login-e2e:
|
||||
|
||||
@@ -356,12 +356,20 @@ func CreateComponent(client *occlient.Client, componentConfig config.LocalConfig
|
||||
appName := componentConfig.GetApplication()
|
||||
envVarsList := componentConfig.GetEnvVars()
|
||||
|
||||
// create and get the storage to be created/mounted during the component creation
|
||||
storageList := getStorageFromConfig(&componentConfig)
|
||||
storageToBeMounted, _, err := storage.Push(client, storageList, componentConfig.GetName(), componentConfig.GetApplication(), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Successf("Initializing '%s' component", cmpName)
|
||||
createArgs := occlient.CreateArgs{
|
||||
Name: cmpName,
|
||||
ImageName: cmpType,
|
||||
ApplicationName: appName,
|
||||
EnvVars: envVarsList.ToStringSlice(),
|
||||
Name: cmpName,
|
||||
ImageName: cmpType,
|
||||
ApplicationName: appName,
|
||||
EnvVars: envVarsList.ToStringSlice(),
|
||||
StorageToBeMounted: storageToBeMounted,
|
||||
}
|
||||
createArgs.SourceType = cmpSrcType
|
||||
createArgs.SourcePath = componentConfig.GetSourceLocation()
|
||||
@@ -897,6 +905,13 @@ func Update(client *occlient.Client, componentSettings config.LocalConfigInfo, n
|
||||
cmpPorts := componentSettings.GetPorts()
|
||||
envVarsList := componentSettings.GetEnvVars()
|
||||
|
||||
// retrieve the list of storages to create/mount and unmount
|
||||
storageList := getStorageFromConfig(&componentSettings)
|
||||
storageToMount, storageToUnMount, err := storage.Push(client, storageList, componentSettings.GetName(), componentSettings.GetApplication(), true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get storage to mount and unmount")
|
||||
}
|
||||
|
||||
// Retrieve the old source type
|
||||
oldSourceType, _, err := GetComponentSource(client, componentName, applicationName)
|
||||
if err != nil {
|
||||
@@ -976,12 +991,13 @@ func Update(client *occlient.Client, componentSettings config.LocalConfigInfo, n
|
||||
return err
|
||||
}
|
||||
updateComponentParams := occlient.UpdateComponentParams{
|
||||
CommonObjectMeta: commonObjectMeta,
|
||||
ImageMeta: commonImageMeta,
|
||||
ResourceLimits: resourceLimits,
|
||||
EnvVars: evl,
|
||||
DcRollOutWaitCond: occlient.IsDCRolledOut,
|
||||
ExistingDC: currentDC,
|
||||
CommonObjectMeta: commonObjectMeta,
|
||||
ImageMeta: commonImageMeta,
|
||||
ResourceLimits: resourceLimits,
|
||||
DcRollOutWaitCond: occlient.IsDCRolledOut,
|
||||
ExistingDC: currentDC,
|
||||
StorageToBeMounted: storageToMount,
|
||||
StorageToBeUnMounted: storageToUnMount,
|
||||
}
|
||||
// STEP 2. Determine what the new source is going to be
|
||||
|
||||
@@ -1275,3 +1291,13 @@ func isEmpty(name string) (bool, error) {
|
||||
}
|
||||
return false, err // Either not empty or error, suits both cases
|
||||
}
|
||||
|
||||
// getStorageFromConfig gets all the storage from the config
|
||||
// returns a list of storage in storage struct format
|
||||
func getStorageFromConfig(localConfig *config.LocalConfigInfo) storage.StorageList {
|
||||
storageList := storage.StorageList{}
|
||||
for _, storageVar := range localConfig.GetStorage() {
|
||||
storageList.Items = append(storageList.Items, storage.GetMachineReadableFormat(storageVar.Name, storageVar.Size, storageVar.Path))
|
||||
}
|
||||
return storageList
|
||||
}
|
||||
|
||||
@@ -23,9 +23,14 @@ const (
|
||||
localConfigAPIVersion = "odo.openshift.io/v1alpha1"
|
||||
)
|
||||
|
||||
type ComponentStorageSettings struct {
|
||||
Name string `yaml:"Name,omitempty"`
|
||||
Size string `yaml:"Size,omitempty"`
|
||||
Path string `yaml:"Path,omitempty"`
|
||||
}
|
||||
|
||||
// ComponentSettings holds all component related information
|
||||
type ComponentSettings struct {
|
||||
|
||||
// The builder image to use
|
||||
Type *string `yaml:"Type,omitempty"`
|
||||
|
||||
@@ -53,6 +58,8 @@ type ComponentSettings struct {
|
||||
|
||||
MaxMemory *string `yaml:"MaxMemory,omitempty"`
|
||||
|
||||
Storage *[]ComponentStorageSettings `yaml:"Storage,omitempty"`
|
||||
|
||||
// Ignore if set to true then odoignore file should be considered
|
||||
Ignore *bool `yaml:"Ignore,omitempty"`
|
||||
|
||||
@@ -216,6 +223,13 @@ func (lci *LocalConfigInfo) SetConfiguration(parameter string, value interface{}
|
||||
lci.componentSettings.MinCPU = &strValue
|
||||
case "maxcpu":
|
||||
lci.componentSettings.MaxCPU = &strValue
|
||||
case "storage":
|
||||
storageSetting, _ := value.(ComponentStorageSettings)
|
||||
if lci.componentSettings.Storage != nil {
|
||||
*lci.componentSettings.Storage = append(*lci.componentSettings.Storage, storageSetting)
|
||||
} else {
|
||||
lci.componentSettings.Storage = &[]ComponentStorageSettings{storageSetting}
|
||||
}
|
||||
case "cpu":
|
||||
lci.componentSettings.MinCPU = &strValue
|
||||
lci.componentSettings.MaxCPU = &strValue
|
||||
@@ -292,7 +306,24 @@ func (lci *LocalConfigInfo) DeleteUrl(parameter string) error {
|
||||
}
|
||||
}
|
||||
return lci.writeToFile()
|
||||
}
|
||||
|
||||
// DeleteFromConfigurationList is used to delete a value from a list from the local odo config
|
||||
// parameter is the name of the config parameter
|
||||
// value is the value to be deleted
|
||||
func (lci *LocalConfigInfo) DeleteFromConfigurationList(parameter string, value string) error {
|
||||
if parameter, ok := asLocallySupportedParameter(parameter); ok {
|
||||
switch parameter {
|
||||
case "storage":
|
||||
for i, storage := range lci.GetStorage() {
|
||||
if storage.Name == value {
|
||||
*lci.componentSettings.Storage = append((*lci.componentSettings.Storage)[:i], (*lci.componentSettings.Storage)[i+1:]...)
|
||||
}
|
||||
}
|
||||
return lci.writeToFile()
|
||||
}
|
||||
}
|
||||
return errors.Errorf("unknown parameter :'%s' is not a parameter in local odo config", parameter)
|
||||
}
|
||||
|
||||
// GetComponentSettings returns the componentSettings from local config
|
||||
@@ -439,8 +470,15 @@ func (lc *LocalConfig) GetUrl() []ConfigUrl {
|
||||
return *lc.componentSettings.Url
|
||||
}
|
||||
|
||||
const (
|
||||
// GetStorage returns the Storage, returns empty if nil
|
||||
func (lc *LocalConfig) GetStorage() []ComponentStorageSettings {
|
||||
if lc.componentSettings.Storage == nil {
|
||||
return []ComponentStorageSettings{}
|
||||
}
|
||||
return *lc.componentSettings.Storage
|
||||
}
|
||||
|
||||
const (
|
||||
// Type is the name of the setting controlling the component type i.e. builder image
|
||||
Type = "Type"
|
||||
// TypeDescription is human-readable description of the componentType setting
|
||||
@@ -499,6 +537,10 @@ const (
|
||||
RefDescription = "Git ref to use for creating component from git source"
|
||||
// SourceTypeDescription is the description of type setting
|
||||
SourceTypeDescription = "Type of component source - git/binary/local"
|
||||
// Storage is the name of the setting controlling storage
|
||||
Storage = "Storage"
|
||||
// StorageDescription is the description of the storage
|
||||
StorageDescription = "Storage of the component"
|
||||
// SourceLocationDescription is the human-readable description of path setting
|
||||
SourceLocationDescription = "The path indicates the location of binary file or git source"
|
||||
// Url
|
||||
@@ -523,6 +565,7 @@ var (
|
||||
Ignore: IgnoreDescription,
|
||||
MinCPU: MinCPUDescription,
|
||||
MaxCPU: MaxCPUDescription,
|
||||
Storage: StorageDescription,
|
||||
CPU: CPUDescription,
|
||||
Url: UrlDescription,
|
||||
}
|
||||
|
||||
@@ -279,7 +279,7 @@ func TestLocalUnsetConfiguration(t *testing.T) {
|
||||
func TestLowerCaseParameterForLocalParameters(t *testing.T) {
|
||||
expected := map[string]bool{"name": true, "minmemory": true, "ignore": true, "project": true,
|
||||
"application": true, "type": true, "ref": true, "mincpu": true, "cpu": true, "ports": true, "maxmemory": true,
|
||||
"maxcpu": true, "sourcetype": true, "sourcelocation": true, "memory": true, "url": true}
|
||||
"maxcpu": true, "sourcetype": true, "sourcelocation": true, "memory": true, "storage": true, "url": true}
|
||||
actual := util.GetLowerCaseParameters(GetLocallySupportedParameters())
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("expected '%v', got '%v'", expected, actual)
|
||||
|
||||
71
pkg/config/storage.go
Normal file
71
pkg/config/storage.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (lci *LocalConfigInfo) StorageCreate(name, size, path string) (ComponentStorageSettings, error) {
|
||||
storage := ComponentStorageSettings{
|
||||
Name: name,
|
||||
Size: size,
|
||||
Path: path,
|
||||
}
|
||||
err := lci.SetConfiguration("storage", storage)
|
||||
if err != nil {
|
||||
return ComponentStorageSettings{}, err
|
||||
}
|
||||
return storage, err
|
||||
}
|
||||
|
||||
func (lci *LocalConfigInfo) StorageExists(storageName string) bool {
|
||||
for _, storage := range lci.GetStorage() {
|
||||
if storageName == storage.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lci *LocalConfigInfo) StorageList() ([]ComponentStorageSettings, error) {
|
||||
storageConfigList := lci.GetStorage()
|
||||
var storageList []ComponentStorageSettings
|
||||
for _, storage := range storageConfigList {
|
||||
storageList = append(storageList, ComponentStorageSettings{
|
||||
Name: storage.Name,
|
||||
Path: storage.Path,
|
||||
Size: storage.Size,
|
||||
})
|
||||
}
|
||||
return storageList, nil
|
||||
}
|
||||
|
||||
func (lci *LocalConfigInfo) ValidateStorage(storageName, storagePath string) error {
|
||||
for _, storage := range lci.GetStorage() {
|
||||
if storage.Name == storageName {
|
||||
return errors.Errorf("there already is a storage with the name %s", storageName)
|
||||
}
|
||||
if storage.Path == storagePath {
|
||||
return errors.Errorf("there already is a storage mounted at %s", storagePath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lci *LocalConfigInfo) StorageDelete(name string) error {
|
||||
exists := lci.StorageExists(name)
|
||||
if !exists {
|
||||
return errors.Errorf("storage named %s doesn't exists", name)
|
||||
}
|
||||
return lci.DeleteFromConfigurationList("storage", name)
|
||||
}
|
||||
|
||||
func (lci *LocalConfigInfo) GetMountPath(storageName string) string {
|
||||
var mPath string
|
||||
storageList, _ := lci.StorageList()
|
||||
for _, storage := range storageList {
|
||||
if storage.Name == storageName {
|
||||
mPath = storage.Path
|
||||
}
|
||||
}
|
||||
return mPath
|
||||
}
|
||||
495
pkg/config/storage_test.go
Normal file
495
pkg/config/storage_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLocalConfigInfo_StorageCreate(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storageName string
|
||||
storageSize string
|
||||
storagePath string
|
||||
existingConfig LocalConfig
|
||||
}{
|
||||
{
|
||||
name: "case 1: no other storage present",
|
||||
storageName: "example-storage-0",
|
||||
storageSize: "100M",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "case 2: one other storage present",
|
||||
storageName: "example-storage-1",
|
||||
storageSize: "100M",
|
||||
storagePath: "/data-1",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
_, err = cfg.StorageCreate(tt.storageName, tt.storageSize, tt.storagePath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, storage := range *cfg.componentSettings.Storage {
|
||||
if storage.Name == tt.storageName && storage.Size == tt.storageSize && storage.Path == tt.storagePath {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("the storage '%v' is not set properly in the config", tt)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalConfigInfo_StorageExists(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storageName string
|
||||
existingConfig LocalConfig
|
||||
storageExists bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: storage present",
|
||||
storageName: "example-storage-1",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
storageExists: true,
|
||||
},
|
||||
{
|
||||
name: "case 2: storage present",
|
||||
storageName: "example-storage-1",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
storageExists: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
exists := cfg.StorageExists(tt.storageName)
|
||||
if exists != tt.storageExists {
|
||||
t.Errorf("wrong value of exists, expected: %v, unexpected: %v", tt.storageExists, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalConfigInfo_StorageList(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
existingConfig LocalConfig
|
||||
}{
|
||||
{
|
||||
name: "case 1: one storage exists",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data-0",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "case 2: more than one storage exists",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data-0",
|
||||
Size: "100M",
|
||||
},
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
Path: "/data-1",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "case 3: no storage exists",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
storageList, err := cfg.StorageList()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(*tt.existingConfig.componentSettings.Storage) != len(storageList) {
|
||||
t.Errorf("length mismatch, expected: %v, unexpected: %v", len(*tt.existingConfig.componentSettings.Storage), len(storageList))
|
||||
}
|
||||
|
||||
for _, storageConfig := range *tt.existingConfig.componentSettings.Storage {
|
||||
found := false
|
||||
|
||||
for _, storageResult := range storageList {
|
||||
if reflect.DeepEqual(storageResult, storageConfig) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("storage %v not found while listing", storageConfig)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalConfigInfo_ValidateStorage(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storageName string
|
||||
storagePath string
|
||||
existingConfig LocalConfig
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: no storage present in config",
|
||||
storageName: "example-storage-0",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{},
|
||||
},
|
||||
},
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "case 2: storage present in config with no conflict",
|
||||
storageName: "example-storage-0",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
Path: "/data-1",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "case 3: storage present in config and with path conflict",
|
||||
storageName: "example-storage-0",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
Path: "/data",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "case 4: storage present in config and with name conflict",
|
||||
storageName: "example-storage-0",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data-1",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "case 5: storage present in config and with name and path conflicts",
|
||||
storageName: "example-storage-0",
|
||||
storagePath: "/data",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data",
|
||||
Size: "100M",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
err = cfg.ValidateStorage(tt.storageName, tt.storagePath)
|
||||
|
||||
if !tt.wantError && err != nil {
|
||||
t.Errorf("no error expected,but got error: %v", err)
|
||||
}
|
||||
|
||||
if tt.wantError && err == nil {
|
||||
t.Errorf("error expected,but got no error")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalConfigInfo_StorageDelete(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storageName string
|
||||
existingConfig LocalConfig
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: storage does exist",
|
||||
storageName: "example-storage-0",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "case 2: storage doesn't exist",
|
||||
storageName: "example-storage-0",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
err = cfg.StorageDelete(tt.storageName)
|
||||
|
||||
if !tt.wantError && err != nil {
|
||||
t.Errorf("no error expected,but got error: %v", err)
|
||||
}
|
||||
|
||||
if tt.wantError && err == nil {
|
||||
t.Errorf("error expected,but got no error")
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, storage := range *cfg.componentSettings.Storage {
|
||||
if storage.Name == tt.storageName {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if found {
|
||||
t.Errorf("the storage '%v' is not deleted properly from the config", tt.storageName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalConfigInfo_GetMountPath(t *testing.T) {
|
||||
tempConfigFile, err := ioutil.TempFile("", "odoconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tempConfigFile.Close()
|
||||
os.Setenv(localConfigEnvName, tempConfigFile.Name())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storageName string
|
||||
existingConfig LocalConfig
|
||||
wantPath string
|
||||
}{
|
||||
{
|
||||
name: "case 1: no storage exists",
|
||||
storageName: "example-storage-0",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{},
|
||||
},
|
||||
},
|
||||
wantPath: "",
|
||||
},
|
||||
{
|
||||
name: "case 2: storage exists and one storage exists in config",
|
||||
storageName: "example-storage-0",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPath: "/data",
|
||||
},
|
||||
{
|
||||
name: "case 3: storage exists and two storage exists in config",
|
||||
storageName: "example-storage-1",
|
||||
existingConfig: LocalConfig{
|
||||
componentSettings: ComponentSettings{
|
||||
Storage: &[]ComponentStorageSettings{
|
||||
{
|
||||
Name: "example-storage-0",
|
||||
Path: "/data",
|
||||
},
|
||||
{
|
||||
Name: "example-storage-1",
|
||||
Path: "/data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPath: "/data-1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := NewLocalConfigInfo("")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cfg.LocalConfig = tt.existingConfig
|
||||
|
||||
path := cfg.GetMountPath(tt.storageName)
|
||||
|
||||
if path != tt.wantPath {
|
||||
t.Errorf("the value of returned path is different, expected: %v, got: %v", tt.wantPath, path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -47,8 +47,6 @@ import (
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// utilities
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -80,6 +78,9 @@ type CreateArgs struct {
|
||||
Resources *corev1.ResourceRequirements
|
||||
ApplicationName string
|
||||
Wait bool
|
||||
// StorageToBeMounted describes the storage to be created
|
||||
// storagePath is the key of the map, the generatedPVC is the value of the map
|
||||
StorageToBeMounted map[string]*corev1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -166,7 +167,7 @@ type S2IPaths struct {
|
||||
BuilderImgName string
|
||||
}
|
||||
|
||||
// UpdateComponentParams serves the purpose of holding the arguements to a component update request
|
||||
// UpdateComponentParams serves the purpose of holding the arguments to a component update request
|
||||
type UpdateComponentParams struct {
|
||||
// CommonObjectMeta is the object meta containing the labels and annotations expected for the new deployment
|
||||
CommonObjectMeta metav1.ObjectMeta
|
||||
@@ -180,6 +181,12 @@ type UpdateComponentParams struct {
|
||||
DcRollOutWaitCond dcRollOutWait
|
||||
// ImageMeta describes the image to be used in dc(builder image for local/binary and built component image for git deployments)
|
||||
ImageMeta CommonImageMeta
|
||||
// StorageToBeMounted describes the storage to be mounted
|
||||
// storagePath is the key of the map, the generatedPVC is the value of the map
|
||||
StorageToBeMounted map[string]*corev1.PersistentVolumeClaim
|
||||
// StorageToBeUnMounted describes the storage to be unmounted
|
||||
// path is the key of the map,storageName is the value of the map
|
||||
StorageToBeUnMounted map[string]string
|
||||
}
|
||||
|
||||
// S2IDeploymentsDir is a set of possible S2I labels that provides S2I deployments directory
|
||||
@@ -852,6 +859,10 @@ func (c *Client) NewAppS2I(params CreateArgs, commonObjectMeta metav1.ObjectMeta
|
||||
|
||||
// Generate and create the DeploymentConfig
|
||||
dc := generateGitDeploymentConfig(commonObjectMeta, buildConfig.Spec.Output.To.Name, containerPorts, inputEnvVars, params.Resources)
|
||||
err = addOrRemoveVolumeAndVolumeMount(c, &dc, params.StorageToBeMounted, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to mount and unmount pvc to dc")
|
||||
}
|
||||
_, err = c.appsClient.DeploymentConfigs(c.Namespace).Create(&dc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to create DeploymentConfig for %s", commonObjectMeta.Name)
|
||||
@@ -1173,6 +1184,10 @@ func (c *Client) BootstrapSupervisoredS2I(params CreateArgs, commonObjectMeta me
|
||||
addBootstrapSupervisordInitContainer(&dc, commonObjectMeta.Name)
|
||||
addBootstrapVolume(&dc, commonObjectMeta.Name)
|
||||
addBootstrapVolumeMount(&dc, commonObjectMeta.Name)
|
||||
err = addOrRemoveVolumeAndVolumeMount(c, &dc, params.StorageToBeMounted, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to mount and unmount pvc to dc")
|
||||
}
|
||||
|
||||
if len(inputEnvs) != 0 {
|
||||
err = updateEnvVar(&dc, inputEnvs)
|
||||
@@ -1311,10 +1326,14 @@ type dcRollOutWait func(*appsv1.DeploymentConfig, int64) bool
|
||||
// if prePatchDCHandler is specified (meaning not nil), then it's applied
|
||||
// as the last action before the actual call to the Kubernetes API thus giving us the chance
|
||||
// to perform arbitrary updates to a DC before it's finalized for patching
|
||||
func (c *Client) PatchCurrentDC(name string, dc appsv1.DeploymentConfig, prePatchDCHandler dcStructUpdater, waitCond dcRollOutWait, currentDC *appsv1.DeploymentConfig, existingCmpContainer corev1.Container, waitForDc bool) error {
|
||||
func (c *Client) PatchCurrentDC(dc appsv1.DeploymentConfig, prePatchDCHandler dcStructUpdater, existingCmpContainer corev1.Container, ucp UpdateComponentParams, waitForDc bool) error {
|
||||
|
||||
name := ucp.CommonObjectMeta.Name
|
||||
currentDC := ucp.ExistingDC
|
||||
modifiedDC := *currentDC
|
||||
|
||||
waitCond := ucp.DcRollOutWaitCond
|
||||
|
||||
// copy the any remaining volumes and volume mounts
|
||||
copyVolumesAndVolumeMounts(dc, currentDC, existingCmpContainer)
|
||||
|
||||
@@ -1325,6 +1344,12 @@ func (c *Client) PatchCurrentDC(name string, dc appsv1.DeploymentConfig, prePatc
|
||||
}
|
||||
}
|
||||
|
||||
// now mount/unmount the newly created/deleted pvc
|
||||
err := addOrRemoveVolumeAndVolumeMount(c, &dc, ucp.StorageToBeMounted, ucp.StorageToBeUnMounted)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace the current spec with the new one
|
||||
modifiedDC.Spec = dc.Spec
|
||||
|
||||
@@ -1338,6 +1363,7 @@ func (c *Client) PatchCurrentDC(name string, dc appsv1.DeploymentConfig, prePatc
|
||||
// despite the "patch" function name, we use update since `.Patch` requires
|
||||
// use to define each and every object we must change. Updating makes it easier.
|
||||
updatedDc, err := c.appsClient.DeploymentConfigs(c.Namespace).Update(&modifiedDC)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to update DeploymentConfig %s", name)
|
||||
}
|
||||
@@ -1450,12 +1476,10 @@ func (c *Client) UpdateDCToGit(ucp UpdateComponentParams, isDeleteSupervisordVol
|
||||
if isDeleteSupervisordVolumes {
|
||||
// Patch the current DC
|
||||
err = c.PatchCurrentDC(
|
||||
ucp.CommonObjectMeta.Name,
|
||||
dc,
|
||||
removeTracesOfSupervisordFromDC,
|
||||
ucp.DcRollOutWaitCond,
|
||||
ucp.ExistingDC,
|
||||
existingCmpContainer,
|
||||
ucp,
|
||||
true,
|
||||
)
|
||||
|
||||
@@ -1470,12 +1494,10 @@ func (c *Client) UpdateDCToGit(ucp UpdateComponentParams, isDeleteSupervisordVol
|
||||
}
|
||||
} else {
|
||||
err = c.PatchCurrentDC(
|
||||
ucp.CommonObjectMeta.Name,
|
||||
dc,
|
||||
nil,
|
||||
ucp.DcRollOutWaitCond,
|
||||
ucp.ExistingDC,
|
||||
existingCmpContainer,
|
||||
ucp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
@@ -1605,12 +1627,10 @@ func (c *Client) UpdateDCToSupervisor(ucp UpdateComponentParams, isToLocal bool,
|
||||
|
||||
// Patch the current DC with the new one
|
||||
err = c.PatchCurrentDC(
|
||||
ucp.CommonObjectMeta.Name,
|
||||
dc,
|
||||
nil,
|
||||
ucp.DcRollOutWaitCond,
|
||||
ucp.ExistingDC,
|
||||
existingCmpContainer,
|
||||
ucp,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -2557,43 +2577,6 @@ func (c *Client) ListSecrets(labelSelector string) ([]corev1.Secret, error) {
|
||||
return secretList.Items, nil
|
||||
}
|
||||
|
||||
// CreatePVC creates a PVC resource in the cluster with the given name, size and
|
||||
// labels
|
||||
func (c *Client) CreatePVC(name string, size string, labels map[string]string) (*corev1.PersistentVolumeClaim, error) {
|
||||
quantity, err := resource.ParseQuantity(size)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse size: %v", size)
|
||||
}
|
||||
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
createdPvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Create(pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to create PVC")
|
||||
}
|
||||
return createdPvc, nil
|
||||
}
|
||||
|
||||
// DeletePVC deletes the given PVC by name
|
||||
func (c *Client) DeletePVC(name string) error {
|
||||
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
// DeleteBuildConfig deletes the given BuildConfig by name using CommonObjectMeta..
|
||||
func (c *Client) DeleteBuildConfig(commonObjectMeta metav1.ObjectMeta) error {
|
||||
|
||||
@@ -2606,77 +2589,6 @@ func (c *Client) DeleteBuildConfig(commonObjectMeta metav1.ObjectMeta) error {
|
||||
return c.buildClient.BuildConfigs(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
|
||||
}
|
||||
|
||||
// generateVolumeNameFromPVC generates a random volume name based on the name
|
||||
// of the given PVC
|
||||
func generateVolumeNameFromPVC(pvc string) string {
|
||||
return fmt.Sprintf("%v-%v-volume", pvc, util.GenerateRandomString(nameLength))
|
||||
}
|
||||
|
||||
// AddPVCToDeploymentConfig adds the given PVC to the given Deployment Config
|
||||
// at the given path
|
||||
func (c *Client) AddPVCToDeploymentConfig(dc *appsv1.DeploymentConfig, pvc string, path string) error {
|
||||
volumeName := generateVolumeNameFromPVC(pvc)
|
||||
|
||||
// Validating dc.Spec.Template is present before dereferencing
|
||||
if dc.Spec.Template == nil {
|
||||
return fmt.Errorf("TemplatePodSpec in %s DeploymentConfig is empty", dc.Name)
|
||||
}
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Validating dc.Spec.Template.Spec.Containers[] is present before dereferencing
|
||||
if len(dc.Spec.Template.Spec.Containers) == 0 {
|
||||
return fmt.Errorf("DeploymentConfig %s doesn't have any Containers defined", dc.Name)
|
||||
}
|
||||
dc.Spec.Template.Spec.Containers[0].VolumeMounts = append(dc.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
)
|
||||
|
||||
glog.V(4).Infof("Updating DeploymentConfig: %v", dc)
|
||||
_, err := c.appsClient.DeploymentConfigs(c.Namespace).Update(dc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to update DeploymentConfig: %v", dc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeVolumeFromDC removes the volume from the given Deployment Config and
|
||||
// returns true. If the given volume is not found, it returns false.
|
||||
func removeVolumeFromDC(vol string, dc *appsv1.DeploymentConfig) bool {
|
||||
found := false
|
||||
for i, volume := range dc.Spec.Template.Spec.Volumes {
|
||||
if volume.Name == vol {
|
||||
found = true
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes[:i], dc.Spec.Template.Spec.Volumes[i+1:]...)
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// removeVolumeMountFromDC removes the volumeMount from all the given containers
|
||||
// in the given Deployment Config and return true. If the given volumeMount is
|
||||
// not found, it returns false
|
||||
func removeVolumeMountFromDC(vm string, dc *appsv1.DeploymentConfig) bool {
|
||||
found := false
|
||||
for i, container := range dc.Spec.Template.Spec.Containers {
|
||||
for j, volumeMount := range container.VolumeMounts {
|
||||
if volumeMount.Name == vm {
|
||||
found = true
|
||||
dc.Spec.Template.Spec.Containers[i].VolumeMounts = append(dc.Spec.Template.Spec.Containers[i].VolumeMounts[:j], dc.Spec.Template.Spec.Containers[i].VolumeMounts[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// RemoveVolumeFromDeploymentConfig removes the volume associated with the
|
||||
// given PVC from the Deployment Config. Both, the volume entry and the
|
||||
// volume mount entry in the containers, are deleted.
|
||||
@@ -2718,27 +2630,6 @@ func (c *Client) RemoveVolumeFromDeploymentConfig(pvc string, dcName string) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolumeNamesFromPVC returns the name of the volume associated with the given
|
||||
// PVC in the given Deployment Config
|
||||
func (c *Client) getVolumeNamesFromPVC(pvc string, dc *appsv1.DeploymentConfig) []string {
|
||||
var volumes []string
|
||||
for _, volume := range dc.Spec.Template.Spec.Volumes {
|
||||
|
||||
// If PVC does not exist, we skip (as this is either EmptyDir or "shared-data" from SupervisorD
|
||||
if volume.PersistentVolumeClaim == nil {
|
||||
glog.V(4).Infof("Volume has no PVC, skipping %s", volume.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// If we find the PVC, add to volumes to be returned
|
||||
if volume.PersistentVolumeClaim.ClaimName == pvc {
|
||||
volumes = append(volumes, volume.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigsFromSelector returns an array of Deployment Config
|
||||
// resources which match the given selector
|
||||
func (c *Client) GetDeploymentConfigsFromSelector(selector string) ([]appsv1.DeploymentConfig, error) {
|
||||
@@ -3243,16 +3134,6 @@ func (c *Client) GetPVCFromName(pvcName string) (*corev1.PersistentVolumeClaim,
|
||||
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Get(pvcName, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// UpdatePVCLabels updates the given PVC with the given labels
|
||||
func (c *Client) UpdatePVCLabels(pvc *corev1.PersistentVolumeClaim, labels map[string]string) error {
|
||||
pvc.Labels = labels
|
||||
_, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to remove storage label from PVC")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateBuildConfig creates a buildConfig using the builderImage as well as gitURL.
|
||||
// envVars is the array containing the environment variables
|
||||
func (c *Client) CreateBuildConfig(commonObjectMeta metav1.ObjectMeta, builderImage string, gitURL string, gitRef string, envVars []corev1.EnvVar) (buildv1.BuildConfig, error) {
|
||||
|
||||
@@ -375,142 +375,6 @@ func TestGetPVCNameFromVolumeMountName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddPVCToDeploymentConfig(t *testing.T) {
|
||||
type args struct {
|
||||
dc *appsv1.DeploymentConfig
|
||||
pvc string
|
||||
path string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Test case 1: valid dc",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
Template: &corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
MountPath: "/tmp",
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test volume",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Test case 2: dc without Containers defined",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
Template: &corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{},
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test-voulme",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Test case 3: dc without Template defined",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test-voulme",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientset := FakeNew()
|
||||
|
||||
fakeClientset.AppsClientset.PrependReactor("update", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
dc := action.(ktesting.UpdateAction).GetObject().(*appsv1.DeploymentConfig)
|
||||
if dc.Name != tt.args.dc.Name {
|
||||
t.Errorf("dc Name mismatch got: %s, expected %s", dc.Name, tt.args.dc.Name)
|
||||
}
|
||||
return true, nil, nil
|
||||
})
|
||||
err := fakeClient.AddPVCToDeploymentConfig(tt.args.dc, tt.args.pvc, tt.args.path)
|
||||
|
||||
// Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf("Client.AddPVCToDeploymentConfig() unexpected error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Checks for number of actions performed in positive cases
|
||||
if err == nil {
|
||||
// Check for validating actions performed
|
||||
if (len(fakeClientset.AppsClientset.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in GetPVCFromName got: %v", fakeClientset.AppsClientset.Actions())
|
||||
}
|
||||
|
||||
updatedDc := fakeClientset.AppsClientset.Actions()[0].(ktesting.UpdateAction).GetObject().(*appsv1.DeploymentConfig)
|
||||
found := false // creating a flag
|
||||
// iterating over the VolumeMounts for finding the one specified during func call
|
||||
for bb := range updatedDc.Spec.Template.Spec.Containers[0].VolumeMounts {
|
||||
if tt.args.path == updatedDc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].MountPath {
|
||||
found = true
|
||||
if !strings.Contains(updatedDc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].Name, tt.args.pvc) {
|
||||
t.Errorf("pvc name not matching with the specified value got: %v, expected %v", updatedDc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].Name, tt.args.pvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
t.Errorf("expected Volume mount path %v not found in VolumeMounts", tt.args.path)
|
||||
}
|
||||
|
||||
found = false // resetting the flag
|
||||
// iterating over the volume claims to find the one specified during func call
|
||||
for bb := range updatedDc.Spec.Template.Spec.Volumes {
|
||||
if tt.args.pvc == updatedDc.Spec.Template.Spec.Volumes[bb].VolumeSource.PersistentVolumeClaim.ClaimName {
|
||||
found = true
|
||||
if !strings.Contains(updatedDc.Spec.Template.Spec.Volumes[bb].Name, tt.args.pvc) {
|
||||
t.Errorf("pvc name not matching in PersistentVolumeClaim, got: %v, expected %v", updatedDc.Spec.Template.Spec.Volumes[bb].Name, tt.args.pvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
t.Errorf("expected volume %s not found in DeploymentConfig.Spec.Template.Spec.Volumes", tt.args.pvc)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveVolumeFromDeploymentConfig(t *testing.T) {
|
||||
type args struct {
|
||||
pvc string
|
||||
@@ -634,122 +498,6 @@ func TestGetPVCFromName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreatePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
size string
|
||||
labels map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "storage 10Gi",
|
||||
size: "10Gi",
|
||||
labels: map[string]string{
|
||||
"name": "mongodb",
|
||||
"namespace": "blog",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage 1024",
|
||||
size: "1024",
|
||||
labels: map[string]string{
|
||||
"name": "PostgreSQL",
|
||||
"namespace": "backend",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage invalid size",
|
||||
size: "4#0",
|
||||
labels: map[string]string{
|
||||
"name": "MySQL",
|
||||
"namespace": "",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fkclient, fkclientset := FakeNew()
|
||||
|
||||
_, err := fkclient.CreatePVC(tt.name, tt.size, tt.labels)
|
||||
|
||||
// Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf(" client.CreatePVC(name, size, labels) unexpected error %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Check for validating actions performed
|
||||
if (len(fkclientset.Kubernetes.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in CreatePVC got: %v", fkclientset.RouteClientset.Actions())
|
||||
}
|
||||
// Checks for return values in positive cases
|
||||
if err == nil {
|
||||
createdPVC := fkclientset.Kubernetes.Actions()[0].(ktesting.CreateAction).GetObject().(*corev1.PersistentVolumeClaim)
|
||||
quantity, err := resource.ParseQuantity(tt.size)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create quantity by calling resource.ParseQuantity(%v)", tt.size)
|
||||
}
|
||||
|
||||
// created PVC should be labeled with labels passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Labels, tt.labels) {
|
||||
t.Errorf("labels in created route is not matching expected labels, expected: %v, got: %v", tt.labels, createdPVC.Labels)
|
||||
}
|
||||
// name, size of createdPVC should be matching to size, name passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Spec.Resources.Requests["storage"], quantity) {
|
||||
t.Errorf("size of PVC is not matching to expected size, expected: %v, got %v", quantity, createdPVC.Spec.Resources.Requests["storage"])
|
||||
}
|
||||
if createdPVC.Name != tt.name {
|
||||
t.Errorf("PVC name is not matching to expected name, expected: %s, got %s", tt.name, createdPVC.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvcName string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "storage 10Gi",
|
||||
pvcName: "postgresql",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientSet := FakeNew()
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("delete", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
err := fakeClient.DeletePVC(tt.pvcName)
|
||||
|
||||
//Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf(" client.DeletePVC(name) unexpected error %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Check for validating actions performed
|
||||
if (len(fakeClientSet.Kubernetes.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in DeletePVC got: %v", fakeClientSet.Kubernetes.Actions())
|
||||
}
|
||||
|
||||
// Check for value with which the function has called
|
||||
DeletedPVC := fakeClientSet.Kubernetes.Actions()[0].(ktesting.DeleteAction).GetName()
|
||||
if DeletedPVC != tt.pvcName {
|
||||
t.Errorf("Delete action is performed with wrong pvcName, expected: %s, got %s", tt.pvcName, DeletedPVC)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRoute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -3584,9 +3332,12 @@ func TestDeleteServiceInstance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPatchCurrentDC(t *testing.T) {
|
||||
dcRollOutWait := func(*appsv1.DeploymentConfig, int64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
dcBefore appsv1.DeploymentConfig
|
||||
ucp UpdateComponentParams
|
||||
dcPatch appsv1.DeploymentConfig
|
||||
prePatchDCHandler dcStructUpdater
|
||||
}
|
||||
@@ -3599,9 +3350,14 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
{
|
||||
name: "Case 1: Test patching with nil prePatchDCHandler",
|
||||
args: args{
|
||||
name: "foo",
|
||||
dcBefore: *fakeDeploymentConfig("foo", "foo", []corev1.EnvVar{{Name: "key1", Value: "value1"},
|
||||
{Name: "key2", Value: "value2"}}, []corev1.EnvFromSource{}, t),
|
||||
ucp: UpdateComponentParams{
|
||||
CommonObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
ExistingDC: fakeDeploymentConfig("foo", "foo", []corev1.EnvVar{{Name: "key1", Value: "value1"},
|
||||
{Name: "key2", Value: "value2"}}, []corev1.EnvFromSource{}, t),
|
||||
DcRollOutWaitCond: dcRollOutWait,
|
||||
},
|
||||
dcPatch: generateGitDeploymentConfig(metav1.ObjectMeta{Name: "foo", Annotations: map[string]string{"app.kubernetes.io/component-source-type": "git"}}, "bar",
|
||||
[]corev1.ContainerPort{{Name: "foo", HostPort: 80, ContainerPort: 80}},
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
@@ -3613,9 +3369,14 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
{
|
||||
name: "Case 2: Test patching with non-nil prePatchDCHandler",
|
||||
args: args{
|
||||
name: "foo",
|
||||
dcBefore: *fakeDeploymentConfig("foo", "foo", []corev1.EnvVar{{Name: "key1", Value: "value1"},
|
||||
{Name: "key2", Value: "value2"}}, []corev1.EnvFromSource{}, t),
|
||||
ucp: UpdateComponentParams{
|
||||
CommonObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
ExistingDC: fakeDeploymentConfig("foo", "foo", []corev1.EnvVar{{Name: "key1", Value: "value1"},
|
||||
{Name: "key2", Value: "value2"}}, []corev1.EnvFromSource{}, t),
|
||||
DcRollOutWaitCond: dcRollOutWait,
|
||||
},
|
||||
dcPatch: generateGitDeploymentConfig(metav1.ObjectMeta{Name: "foo", Annotations: map[string]string{"app.kubernetes.io/component-source-type": "git"}}, "bar",
|
||||
[]corev1.ContainerPort{{Name: "foo", HostPort: 80, ContainerPort: 80}},
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
@@ -3628,10 +3389,15 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
{
|
||||
name: "Case 3: Test patching with the wrong name",
|
||||
args: args{
|
||||
name: "foo",
|
||||
dcBefore: *fakeDeploymentConfig("foo", "foo",
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
[]corev1.EnvFromSource{}, t),
|
||||
ucp: UpdateComponentParams{
|
||||
CommonObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
ExistingDC: fakeDeploymentConfig("foo", "foo",
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
[]corev1.EnvFromSource{}, t),
|
||||
DcRollOutWaitCond: dcRollOutWait,
|
||||
},
|
||||
dcPatch: generateGitDeploymentConfig(metav1.ObjectMeta{Name: "foo2"}, "bar",
|
||||
[]corev1.ContainerPort{{Name: "foo", HostPort: 80, ContainerPort: 80}},
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
@@ -3644,16 +3410,19 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
{
|
||||
name: "Case 4: Test patching with the dc with same requirements",
|
||||
args: args{
|
||||
name: "foo",
|
||||
dcBefore: generateGitDeploymentConfig(metav1.ObjectMeta{Name: "foo2"}, "bar",
|
||||
[]corev1.ContainerPort{{Name: "foo", HostPort: 80, ContainerPort: 80}},
|
||||
ucp: UpdateComponentParams{
|
||||
CommonObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
ExistingDC: fakeDeploymentConfig("foo", "foo",
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
[]corev1.EnvFromSource{}, t,
|
||||
),
|
||||
DcRollOutWaitCond: dcRollOutWait,
|
||||
},
|
||||
dcPatch: *fakeDeploymentConfig("foo", "foo",
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
fakeResourceRequirements(),
|
||||
),
|
||||
dcPatch: generateGitDeploymentConfig(metav1.ObjectMeta{Name: "foo2"}, "bar",
|
||||
[]corev1.ContainerPort{{Name: "foo", HostPort: 80, ContainerPort: 80}},
|
||||
[]corev1.EnvVar{{Name: "key1", Value: "value1"}, {Name: "key2", Value: "value2"}},
|
||||
fakeResourceRequirements(),
|
||||
[]corev1.EnvFromSource{}, t,
|
||||
),
|
||||
},
|
||||
wantErr: false,
|
||||
@@ -3675,7 +3444,7 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
|
||||
// Fake getting DC
|
||||
fakeClientSet.AppsClientset.PrependReactor("get", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &tt.args.dcBefore, nil
|
||||
return true, tt.args.ucp.ExistingDC, nil
|
||||
})
|
||||
|
||||
// Fake the "update"
|
||||
@@ -3688,13 +3457,12 @@ func TestPatchCurrentDC(t *testing.T) {
|
||||
})
|
||||
|
||||
// Run function PatchCurrentDC
|
||||
existingContainer, err := FindContainer(tt.args.dcBefore.Spec.Template.Spec.Containers, tt.args.dcBefore.Name)
|
||||
existingContainer, err := FindContainer(tt.args.ucp.ExistingDC.Spec.Template.Spec.Containers, tt.args.ucp.ExistingDC.Name)
|
||||
if err != nil {
|
||||
t.Errorf("client.PatchCurrentDC() unexpected error attempting to fetch component container. error %v", err)
|
||||
}
|
||||
err = fakeClient.PatchCurrentDC(tt.args.name, tt.args.dcPatch, tt.args.prePatchDCHandler, func(*appsv1.DeploymentConfig, int64) bool {
|
||||
return true
|
||||
}, &(tt.args.dcBefore), existingContainer, true)
|
||||
|
||||
err = fakeClient.PatchCurrentDC(tt.args.dcPatch, tt.args.prePatchDCHandler, existingContainer, tt.args.ucp, true)
|
||||
|
||||
// Error checking PatchCurrentDC
|
||||
if !tt.wantErr == (err != nil) {
|
||||
|
||||
183
pkg/occlient/volumes.go
Normal file
183
pkg/occlient/volumes.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package occlient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
appsv1 "github.com/openshift/api/apps/v1"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CreatePVC creates a PVC resource in the cluster with the given name, size and
|
||||
// labels
|
||||
func (c *Client) CreatePVC(name string, size string, labels map[string]string) (*corev1.PersistentVolumeClaim, error) {
|
||||
quantity, err := resource.ParseQuantity(size)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse size: %v", size)
|
||||
}
|
||||
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
createdPvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Create(pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to create PVC")
|
||||
}
|
||||
return createdPvc, nil
|
||||
}
|
||||
|
||||
// AddPVCToDeploymentConfig adds the given PVC to the given Deployment Config
|
||||
// at the given path
|
||||
func (c *Client) AddPVCToDeploymentConfig(dc *appsv1.DeploymentConfig, pvc string, path string) error {
|
||||
volumeName := generateVolumeNameFromPVC(pvc)
|
||||
|
||||
// Validating dc.Spec.Template is present before dereferencing
|
||||
if dc.Spec.Template == nil {
|
||||
return fmt.Errorf("TemplatePodSpec in %s DeploymentConfig is empty", dc.Name)
|
||||
}
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Validating dc.Spec.Template.Spec.Containers[] is present before dereferencing
|
||||
if len(dc.Spec.Template.Spec.Containers) == 0 {
|
||||
return fmt.Errorf("DeploymentConfig %s doesn't have any Containers defined", dc.Name)
|
||||
}
|
||||
dc.Spec.Template.Spec.Containers[0].VolumeMounts = append(dc.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePVCLabels updates the given PVC with the given labels
|
||||
func (c *Client) UpdatePVCLabels(pvc *corev1.PersistentVolumeClaim, labels map[string]string) error {
|
||||
pvc.Labels = labels
|
||||
_, err := c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to remove storage label from PVC")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeletePVC deletes the given PVC by name
|
||||
func (c *Client) DeletePVC(name string) error {
|
||||
return c.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
// IsAppSupervisorDVolume checks if the volume is a supervisorD volume
|
||||
func (c *Client) IsAppSupervisorDVolume(volumeName, dcName string) bool {
|
||||
if volumeName == getAppRootVolumeName(dcName) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getVolumeNamesFromPVC returns the name of the volume associated with the given
|
||||
// PVC in the given Deployment Config
|
||||
func (c *Client) getVolumeNamesFromPVC(pvc string, dc *appsv1.DeploymentConfig) []string {
|
||||
var volumes []string
|
||||
for _, volume := range dc.Spec.Template.Spec.Volumes {
|
||||
|
||||
// If PVC does not exist, we skip (as this is either EmptyDir or "shared-data" from SupervisorD
|
||||
if volume.PersistentVolumeClaim == nil {
|
||||
glog.V(4).Infof("Volume has no PVC, skipping %s", volume.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// If we find the PVC, add to volumes to be returned
|
||||
if volume.PersistentVolumeClaim.ClaimName == pvc {
|
||||
volumes = append(volumes, volume.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
||||
// removeVolumeFromDC removes the volume from the given Deployment Config and
|
||||
// returns true. If the given volume is not found, it returns false.
|
||||
func removeVolumeFromDC(vol string, dc *appsv1.DeploymentConfig) bool {
|
||||
found := false
|
||||
for i, volume := range dc.Spec.Template.Spec.Volumes {
|
||||
if volume.Name == vol {
|
||||
found = true
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes[:i], dc.Spec.Template.Spec.Volumes[i+1:]...)
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// removeVolumeMountFromDC removes the volumeMount from all the given containers
|
||||
// in the given Deployment Config and return true. If the given volumeMount is
|
||||
// not found, it returns false
|
||||
func removeVolumeMountFromDC(vm string, dc *appsv1.DeploymentConfig) bool {
|
||||
found := false
|
||||
for i, container := range dc.Spec.Template.Spec.Containers {
|
||||
for j, volumeMount := range container.VolumeMounts {
|
||||
if volumeMount.Name == vm {
|
||||
found = true
|
||||
dc.Spec.Template.Spec.Containers[i].VolumeMounts = append(dc.Spec.Template.Spec.Containers[i].VolumeMounts[:j], dc.Spec.Template.Spec.Containers[i].VolumeMounts[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// generateVolumeNameFromPVC generates a random volume name based on the name
|
||||
// of the given PVC
|
||||
func generateVolumeNameFromPVC(pvc string) string {
|
||||
return fmt.Sprintf("%v-%v-volume", pvc, util.GenerateRandomString(nameLength))
|
||||
}
|
||||
|
||||
// addOrRemoveVolumeAndVolumeMount mounts or unmounts PVCs from the given deploymentConfig
|
||||
func addOrRemoveVolumeAndVolumeMount(client *Client, dc *appsv1.DeploymentConfig, storageToMount map[string]*corev1.PersistentVolumeClaim, storageUnMount map[string]string) error {
|
||||
|
||||
if len(dc.Spec.Template.Spec.Containers) == 0 || len(dc.Spec.Template.Spec.Containers) > 1 {
|
||||
return fmt.Errorf("more than one container found in dc")
|
||||
}
|
||||
|
||||
// find the volume mount to be unmounted from the dc
|
||||
for i, volumeMount := range dc.Spec.Template.Spec.Containers[0].VolumeMounts {
|
||||
if _, ok := storageUnMount[volumeMount.MountPath]; ok {
|
||||
dc.Spec.Template.Spec.Containers[0].VolumeMounts = append(dc.Spec.Template.Spec.Containers[0].VolumeMounts[:i], dc.Spec.Template.Spec.Containers[0].VolumeMounts[i+1:]...)
|
||||
|
||||
// now find the volume to be deleted from the dc
|
||||
for j, volume := range dc.Spec.Template.Spec.Volumes {
|
||||
if volume.Name == volumeMount.Name {
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes[:j], dc.Spec.Template.Spec.Volumes[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for path, pvc := range storageToMount {
|
||||
err := client.AddPVCToDeploymentConfig(dc, pvc.Name, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to add pvc to deployment config")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
253
pkg/occlient/volumes_test.go
Normal file
253
pkg/occlient/volumes_test.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package occlient
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
appsv1 "github.com/openshift/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestCreatePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
size string
|
||||
labels map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "storage 10Gi",
|
||||
size: "10Gi",
|
||||
labels: map[string]string{
|
||||
"name": "mongodb",
|
||||
"namespace": "blog",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage 1024",
|
||||
size: "1024",
|
||||
labels: map[string]string{
|
||||
"name": "PostgreSQL",
|
||||
"namespace": "backend",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage invalid size",
|
||||
size: "4#0",
|
||||
labels: map[string]string{
|
||||
"name": "MySQL",
|
||||
"namespace": "",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fkclient, fkclientset := FakeNew()
|
||||
|
||||
_, err := fkclient.CreatePVC(tt.name, tt.size, tt.labels)
|
||||
|
||||
// Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf(" client.CreatePVC(name, size, labels) unexpected error %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Check for validating actions performed
|
||||
if (len(fkclientset.Kubernetes.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in CreatePVC got: %v", fkclientset.RouteClientset.Actions())
|
||||
}
|
||||
// Checks for return values in positive cases
|
||||
if err == nil {
|
||||
createdPVC := fkclientset.Kubernetes.Actions()[0].(ktesting.CreateAction).GetObject().(*corev1.PersistentVolumeClaim)
|
||||
quantity, err := resource.ParseQuantity(tt.size)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create quantity by calling resource.ParseQuantity(%v)", tt.size)
|
||||
}
|
||||
|
||||
// created PVC should be labeled with labels passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Labels, tt.labels) {
|
||||
t.Errorf("labels in created route is not matching expected labels, expected: %v, got: %v", tt.labels, createdPVC.Labels)
|
||||
}
|
||||
// name, size of createdPVC should be matching to size, name passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Spec.Resources.Requests["storage"], quantity) {
|
||||
t.Errorf("size of PVC is not matching to expected size, expected: %v, got %v", quantity, createdPVC.Spec.Resources.Requests["storage"])
|
||||
}
|
||||
if createdPVC.Name != tt.name {
|
||||
t.Errorf("PVC name is not matching to expected name, expected: %s, got %s", tt.name, createdPVC.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvcName string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "storage 10Gi",
|
||||
pvcName: "postgresql",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientSet := FakeNew()
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("delete", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
err := fakeClient.DeletePVC(tt.pvcName)
|
||||
|
||||
//Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf(" client.DeletePVC(name) unexpected error %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Check for validating actions performed
|
||||
if (len(fakeClientSet.Kubernetes.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in DeletePVC got: %v", fakeClientSet.Kubernetes.Actions())
|
||||
}
|
||||
|
||||
// Check for value with which the function has called
|
||||
DeletedPVC := fakeClientSet.Kubernetes.Actions()[0].(ktesting.DeleteAction).GetName()
|
||||
if DeletedPVC != tt.pvcName {
|
||||
t.Errorf("Delete action is performed with wrong pvcName, expected: %s, got %s", tt.pvcName, DeletedPVC)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddPVCToDeploymentConfig(t *testing.T) {
|
||||
type args struct {
|
||||
dc *appsv1.DeploymentConfig
|
||||
pvc string
|
||||
path string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Test case 1: valid dc",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
Template: &corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
MountPath: "/tmp",
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test volume",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Test case 2: dc without Containers defined",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
Template: &corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{},
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test-voulme",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Test case 3: dc without Template defined",
|
||||
args: args{
|
||||
dc: &appsv1.DeploymentConfig{
|
||||
Spec: appsv1.DeploymentConfigSpec{
|
||||
Selector: map[string]string{
|
||||
"deploymentconfig": "nodejs-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: "test-voulme",
|
||||
path: "/mnt",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, _ := FakeNew()
|
||||
|
||||
err := fakeClient.AddPVCToDeploymentConfig(tt.args.dc, tt.args.pvc, tt.args.path)
|
||||
|
||||
// Checks for error in positive cases
|
||||
if !tt.wantErr == (err != nil) {
|
||||
t.Errorf("Client.AddPVCToDeploymentConfig() unexpected error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Checks for number of actions performed in positive cases
|
||||
if err == nil {
|
||||
|
||||
found := false // creating a flag
|
||||
// iterating over the VolumeMounts for finding the one specified during func call
|
||||
for bb := range tt.args.dc.Spec.Template.Spec.Containers[0].VolumeMounts {
|
||||
if tt.args.path == tt.args.dc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].MountPath {
|
||||
found = true
|
||||
if !strings.Contains(tt.args.dc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].Name, tt.args.pvc) {
|
||||
t.Errorf("pvc name not matching with the specified value got: %v, expected %v", tt.args.dc.Spec.Template.Spec.Containers[0].VolumeMounts[bb].Name, tt.args.pvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
t.Errorf("expected Volume mount path %v not found in VolumeMounts", tt.args.path)
|
||||
}
|
||||
|
||||
found = false // resetting the flag
|
||||
// iterating over the volume claims to find the one specified during func call
|
||||
for bb := range tt.args.dc.Spec.Template.Spec.Volumes {
|
||||
if tt.args.pvc == tt.args.dc.Spec.Template.Spec.Volumes[bb].VolumeSource.PersistentVolumeClaim.ClaimName {
|
||||
found = true
|
||||
if !strings.Contains(tt.args.dc.Spec.Template.Spec.Volumes[bb].Name, tt.args.pvc) {
|
||||
t.Errorf("pvc name not matching in PersistentVolumeClaim, got: %v, expected %v", tt.args.dc.Spec.Template.Spec.Volumes[bb].Name, tt.args.pvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
t.Errorf("expected volume %s not found in DeploymentConfig.Spec.Template.Spec.Volumes", tt.args.pvc)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package application
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
"github.com/openshift/odo/pkg/odo/genericclioptions"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/openshift/odo/pkg/url"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
||||
@@ -2,12 +2,12 @@ package component
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
|
||||
"github.com/openshift/odo/pkg/component"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
"github.com/openshift/odo/pkg/odo/util/completion"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/openshift/odo/pkg/url"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
||||
@@ -257,7 +257,6 @@ func (po *PushOptions) Run() (err error) {
|
||||
}
|
||||
|
||||
log.Successf("Changes successfully pushed to component: %v", cmpName)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package project
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/openshift/odo/pkg/application"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
odoutil "github.com/openshift/odo/pkg/odo/util"
|
||||
"github.com/openshift/odo/pkg/odo/util/completion"
|
||||
"github.com/openshift/odo/pkg/service"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/openshift/odo/pkg/url"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -78,6 +78,10 @@ func AddProjectFlag(cmd *cobra.Command) {
|
||||
|
||||
// printDeleteProjectInfo prints objects affected by project deletion
|
||||
func printDeleteProjectInfo(client *occlient.Client, projectName string) error {
|
||||
localConfig, err := config.New()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get the local config")
|
||||
}
|
||||
// Fetch and List the applications
|
||||
applicationList, err := application.ListInProject(client)
|
||||
if err != nil {
|
||||
@@ -114,13 +118,12 @@ func printDeleteProjectInfo(client *occlient.Client, projectName string) error {
|
||||
}
|
||||
}
|
||||
|
||||
storages, err := storage.List(client, currentComponent.Name, app)
|
||||
storages, err := localConfig.StorageList()
|
||||
odoutil.LogErrorAndExit(err, "")
|
||||
if len(storages.Items) != 0 {
|
||||
if len(storages) != 0 {
|
||||
log.Info("This component has following storages which will be deleted with the component")
|
||||
for _, storageName := range componentDesc.Spec.Storage {
|
||||
store := storages.Get(storageName)
|
||||
log.Info("Storage named", store.GetName(), "of size", store.Spec.Size)
|
||||
for _, store := range storages {
|
||||
log.Info("Storage named", store.Name, "of size", store.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
"github.com/openshift/odo/pkg/odo/genericclioptions"
|
||||
"github.com/openshift/odo/pkg/odo/util/completion"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -25,9 +27,11 @@ var (
|
||||
)
|
||||
|
||||
type StorageCreateOptions struct {
|
||||
storageName string
|
||||
storageSize string
|
||||
storagePath string
|
||||
storageName string
|
||||
storageSize string
|
||||
storagePath string
|
||||
componentContext string
|
||||
localConfig *config.LocalConfigInfo
|
||||
*genericclioptions.Context
|
||||
}
|
||||
|
||||
@@ -38,6 +42,11 @@ func NewStorageCreateOptions() *StorageCreateOptions {
|
||||
|
||||
// Complete completes StorageCreateOptions after they've been created
|
||||
func (o *StorageCreateOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) {
|
||||
o.Context = genericclioptions.NewContext(cmd)
|
||||
o.localConfig, err = config.NewLocalConfigInfo(o.componentContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Context = genericclioptions.NewContext(cmd)
|
||||
if len(args) != 0 {
|
||||
o.storageName = args[0]
|
||||
@@ -54,23 +63,25 @@ func (o *StorageCreateOptions) Validate() (err error) {
|
||||
return fmt.Errorf("given output format %s is not supported", o.OutputFlag)
|
||||
}
|
||||
// validate storage path
|
||||
return validateStoragePath(o.Client, o.storageName, o.Component(), o.Application)
|
||||
return o.localConfig.ValidateStorage(o.storageName, o.storagePath)
|
||||
}
|
||||
|
||||
// Run contains the logic for the odo storage create command
|
||||
func (o *StorageCreateOptions) Run() (err error) {
|
||||
storageResult, err := storage.Create(o.Client, o.storageName, o.storageSize, o.storagePath, o.Component(), o.Application)
|
||||
storageResult, err := o.localConfig.StorageCreate(o.storageName, o.storageSize, o.storagePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := util.MachineOutput(o.OutputFlag, storageResult)
|
||||
|
||||
storageResultMachineReadable := storage.GetMachineReadableFormat(storageResult.Name, storageResult.Size, storageResult.Path)
|
||||
out, err := util.MachineOutput(o.OutputFlag, storageResultMachineReadable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out != "" {
|
||||
fmt.Println(out)
|
||||
} else {
|
||||
log.Successf("Added storage %v to %v", o.storageName, o.Component())
|
||||
log.Successf("Added storage %v to %v", o.storageName, o.localConfig.GetName())
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -99,6 +110,8 @@ func NewCmdStorageCreate(name, fullName string) *cobra.Command {
|
||||
componentCmd.AddComponentFlag(storageCreateCmd)
|
||||
|
||||
genericclioptions.AddOutputFlag(storageCreateCmd)
|
||||
genericclioptions.AddContextFlag(storageCreateCmd, &o.componentContext)
|
||||
completion.RegisterCommandFlagHandler(storageCreateCmd, "context", completion.FileCompletionHandler)
|
||||
|
||||
return storageCreateCmd
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
appCmd "github.com/openshift/odo/pkg/odo/cli/application"
|
||||
componentCmd "github.com/openshift/odo/pkg/odo/cli/component"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"github.com/openshift/odo/pkg/odo/cli/ui"
|
||||
"github.com/openshift/odo/pkg/odo/genericclioptions"
|
||||
"github.com/openshift/odo/pkg/odo/util/completion"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/spf13/cobra"
|
||||
ktemplates "k8s.io/kubernetes/pkg/kubectl/cmd/templates"
|
||||
)
|
||||
@@ -30,7 +30,8 @@ var (
|
||||
type StorageDeleteOptions struct {
|
||||
storageName string
|
||||
storageForceDeleteFlag bool
|
||||
componentName string
|
||||
componentContext string
|
||||
localConfig *config.LocalConfigInfo
|
||||
*genericclioptions.Context
|
||||
}
|
||||
|
||||
@@ -41,6 +42,11 @@ func NewStorageDeleteOptions() *StorageDeleteOptions {
|
||||
|
||||
// Complete completes StorageDeleteOptions after they've been created
|
||||
func (o *StorageDeleteOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) {
|
||||
o.Context = genericclioptions.NewContext(cmd)
|
||||
o.localConfig, err = config.NewLocalConfigInfo(o.componentContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Context = genericclioptions.NewContext(cmd)
|
||||
o.storageName = args[0]
|
||||
return
|
||||
@@ -48,42 +54,29 @@ func (o *StorageDeleteOptions) Complete(name string, cmd *cobra.Command, args []
|
||||
|
||||
// Validate validates the StorageDeleteOptions based on completed values
|
||||
func (o *StorageDeleteOptions) Validate() (err error) {
|
||||
exists, err := storage.Exists(o.Client, o.storageName, o.Application)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
exists := o.localConfig.StorageExists(o.storageName)
|
||||
if !exists {
|
||||
return fmt.Errorf("the storage %v does not exists in the application %v, cause %v", o.storageName, o.Application, err)
|
||||
}
|
||||
|
||||
o.componentName, err = storage.GetComponentNameFromStorageName(o.Client, o.storageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get component associated with %s storage, cause %v", o.storageName, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Run contains the logic for the odo storage delete command
|
||||
func (o *StorageDeleteOptions) Run() (err error) {
|
||||
var deleteMsg string
|
||||
if o.componentName != "" {
|
||||
mPath := storage.GetMountPath(o.Client, o.storageName, o.componentName, o.Application)
|
||||
deleteMsg = fmt.Sprintf("Are you sure you want to delete the storage %v mounted to %v in %v component", o.storageName, mPath, o.componentName)
|
||||
} else {
|
||||
deleteMsg = fmt.Sprintf("Are you sure you want to delete the storage %v that is not currently mounted to any component", o.storageName)
|
||||
}
|
||||
|
||||
mPath := o.localConfig.GetMountPath(o.storageName)
|
||||
|
||||
deleteMsg = fmt.Sprintf("Are you sure you want to delete the storage %v mounted to %v in %v component", o.storageName, mPath, o.localConfig.GetName())
|
||||
|
||||
if o.storageForceDeleteFlag || ui.Proceed(deleteMsg) {
|
||||
o.componentName, err = storage.Delete(o.Client, o.storageName, o.Application)
|
||||
err = o.localConfig.StorageDelete(o.storageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete storage, cause %v", err)
|
||||
}
|
||||
if o.componentName != "" {
|
||||
log.Infof("Deleted storage %v from %v", o.storageName, o.componentName)
|
||||
} else {
|
||||
log.Infof("Deleted storage %v", o.storageName)
|
||||
}
|
||||
|
||||
log.Infof("Deleted storage %v from %v", o.storageName, o.localConfig.GetName())
|
||||
} else {
|
||||
return fmt.Errorf("aborting deletion of storage: %v", o.storageName)
|
||||
}
|
||||
@@ -112,5 +105,8 @@ func NewCmdStorageDelete(name, fullName string) *cobra.Command {
|
||||
appCmd.AddApplicationFlag(storageDeleteCmd)
|
||||
componentCmd.AddComponentFlag(storageDeleteCmd)
|
||||
|
||||
genericclioptions.AddContextFlag(storageDeleteCmd, &o.componentContext)
|
||||
completion.RegisterCommandFlagHandler(storageDeleteCmd, "context", completion.FileCompletionHandler)
|
||||
|
||||
return storageDeleteCmd
|
||||
}
|
||||
|
||||
@@ -2,22 +2,19 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
"github.com/openshift/odo/pkg/odo/util/completion"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
"os"
|
||||
|
||||
"encoding/json"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/openshift/odo/pkg/component"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
appCmd "github.com/openshift/odo/pkg/odo/cli/application"
|
||||
componentCmd "github.com/openshift/odo/pkg/odo/cli/component"
|
||||
projectCmd "github.com/openshift/odo/pkg/odo/cli/project"
|
||||
"github.com/openshift/odo/pkg/odo/genericclioptions"
|
||||
odoutil "github.com/openshift/odo/pkg/odo/util"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ktemplates "k8s.io/kubernetes/pkg/kubectl/cmd/templates"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -35,9 +32,8 @@ var (
|
||||
)
|
||||
|
||||
type StorageListOptions struct {
|
||||
storageListAllFlag bool
|
||||
componentName string
|
||||
outputFlag string
|
||||
componentContext string
|
||||
localConfig *config.LocalConfigInfo
|
||||
*genericclioptions.Context
|
||||
}
|
||||
|
||||
@@ -49,108 +45,65 @@ func NewStorageListOptions() *StorageListOptions {
|
||||
// Complete completes StorageListOptions after they've been created
|
||||
func (o *StorageListOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) {
|
||||
o.Context = genericclioptions.NewContext(cmd)
|
||||
if o.storageListAllFlag {
|
||||
if len(genericclioptions.FlagValueIfSet(cmd, genericclioptions.ComponentFlagName)) > 0 {
|
||||
return fmt.Errorf("invalid arguments. Component name is not needed")
|
||||
}
|
||||
} else {
|
||||
o.componentName = o.Component()
|
||||
o.localConfig, err = config.NewLocalConfigInfo(o.componentContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validate validates the StorageListOptions based on completed values
|
||||
func (o *StorageListOptions) Validate() (err error) {
|
||||
return odoutil.CheckOutputFlag(o.outputFlag)
|
||||
// check the machine readable format
|
||||
if !util.CheckOutputFlag(o.OutputFlag) {
|
||||
return fmt.Errorf("given output format %s is not supported", o.OutputFlag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run contains the logic for the odo storage list command
|
||||
func (o *StorageListOptions) Run() (err error) {
|
||||
if o.outputFlag == "json" {
|
||||
var storeList []storage.Storage
|
||||
if o.storageListAllFlag {
|
||||
componentList, err := component.List(o.Client, o.Application)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, component := range componentList.Items {
|
||||
mountedStorages, err := storage.ListMounted(o.Client, component.Name, o.Application)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, storage := range mountedStorages.Items {
|
||||
mounted := getMachineReadableFormat(true, storage)
|
||||
storeList = append(storeList, mounted)
|
||||
}
|
||||
}
|
||||
storageListConfig, err := o.localConfig.StorageList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
componentName := o.Component()
|
||||
mountedStorages, err := storage.ListMounted(o.Client, componentName, o.Application)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, storage := range mountedStorages.Items {
|
||||
mounted := getMachineReadableFormat(true, storage)
|
||||
storeList = append(storeList, mounted)
|
||||
storageListMachineReadable := []storage.Storage{}
|
||||
|
||||
}
|
||||
}
|
||||
unmountedStorages, err := storage.ListUnmounted(o.Client, o.Application)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, storage := range unmountedStorages.Items {
|
||||
unmounted := getMachineReadableFormat(false, storage)
|
||||
storeList = append(storeList, unmounted)
|
||||
}
|
||||
storageList := storage.StorageList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "List",
|
||||
APIVersion: "odo.openshift.io/v1aplha1",
|
||||
},
|
||||
ListMeta: metav1.ListMeta{},
|
||||
Items: storeList,
|
||||
}
|
||||
out, err := json.Marshal(storageList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(out))
|
||||
for _, storageConfig := range storageListConfig {
|
||||
storageListMachineReadable = append(storageListMachineReadable, storage.GetMachineReadableFormat(storageConfig.Name, storageConfig.Size, storageConfig.Path))
|
||||
}
|
||||
storageListResultMachineReadable := storage.GetMachineReadableFormatForList(storageListMachineReadable)
|
||||
out, err := util.MachineOutput(o.OutputFlag, storageListResultMachineReadable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
fmt.Println(out)
|
||||
} else {
|
||||
// defining the column structure of the table
|
||||
tabWriterMounted := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent)
|
||||
|
||||
if o.storageListAllFlag {
|
||||
printMountedStorageInAllComponent(o.Client, o.Application)
|
||||
// create headers of mounted storage table
|
||||
fmt.Fprintln(tabWriterMounted, "NAME", "\t", "SIZE", "\t", "PATH")
|
||||
// iterating over all mounted storage and put in the mount storage table
|
||||
if len(storageListConfig) > 0 {
|
||||
for _, mStorage := range storageListConfig {
|
||||
fmt.Fprintln(tabWriterMounted, mStorage.Name, "\t", mStorage.Size, "\t", mStorage.Path)
|
||||
}
|
||||
|
||||
// print all mounted storage of the given component
|
||||
log.Infof("The component '%v' has the following storage attached:", o.localConfig.GetName())
|
||||
tabWriterMounted.Flush()
|
||||
} else {
|
||||
// storageComponent is the input component name
|
||||
componentName := o.Component()
|
||||
printMountedStorageInComponent(o.Client, componentName, o.Application)
|
||||
log.Infof("The component '%v' has no storage attached", o.localConfig.GetName())
|
||||
}
|
||||
printUnmountedStorage(o.Client, o.Application)
|
||||
fmt.Println("")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getMachineReadableFormat returns resource information in machine readable format
|
||||
func getMachineReadableFormat(mounted bool, stor storage.Storage) storage.Storage {
|
||||
return storage.Storage{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Storage",
|
||||
APIVersion: "odo.openshift.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: stor.Name,
|
||||
},
|
||||
Spec: storage.StorageSpec{
|
||||
Size: stor.Spec.Size,
|
||||
},
|
||||
Status: storage.StorageStatus{
|
||||
Path: stor.Status.Path,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// NewCmdStorageList implements the odo storage list command.
|
||||
func NewCmdStorageList(name, fullName string) *cobra.Command {
|
||||
o := NewStorageListOptions()
|
||||
@@ -165,75 +118,13 @@ func NewCmdStorageList(name, fullName string) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
storageListCmd.Flags().BoolVarP(&o.storageListAllFlag, "all", "a", false, "List all storage in all components")
|
||||
storageListCmd.Flags().StringVarP(&o.outputFlag, "output", "o", "", "output in json format")
|
||||
|
||||
projectCmd.AddProjectFlag(storageListCmd)
|
||||
appCmd.AddApplicationFlag(storageListCmd)
|
||||
componentCmd.AddComponentFlag(storageListCmd)
|
||||
|
||||
genericclioptions.AddOutputFlag(storageListCmd)
|
||||
genericclioptions.AddContextFlag(storageListCmd, &o.componentContext)
|
||||
completion.RegisterCommandFlagHandler(storageListCmd, "context", completion.FileCompletionHandler)
|
||||
|
||||
return storageListCmd
|
||||
}
|
||||
|
||||
// printMountedStorageInComponent prints all the mounted storage in a given component of the application
|
||||
func printMountedStorageInComponent(client *occlient.Client, componentName string, applicationName string) {
|
||||
|
||||
// defining the column structure of the table
|
||||
tabWriterMounted := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent)
|
||||
|
||||
// create headers of mounted storage table
|
||||
fmt.Fprintln(tabWriterMounted, "NAME", "\t", "SIZE", "\t", "PATH")
|
||||
|
||||
storageListMounted, err := storage.ListMounted(client, componentName, applicationName)
|
||||
odoutil.LogErrorAndExit(err, "could not get mounted storage list")
|
||||
|
||||
// iterating over all mounted storage and put in the mount storage table
|
||||
if len(storageListMounted.Items) > 0 {
|
||||
for _, mStorage := range storageListMounted.Items {
|
||||
fmt.Fprintln(tabWriterMounted, mStorage.Name, "\t", mStorage.Spec.Size, "\t", mStorage.Status.Path)
|
||||
}
|
||||
|
||||
// print all mounted storage of the given component
|
||||
log.Infof("The component '%v' has the following storage attached:", componentName)
|
||||
tabWriterMounted.Flush()
|
||||
} else {
|
||||
log.Infof("The component '%v' has no storage attached", componentName)
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
// printMountedStorageInAllComponent prints all the mounted storage in all the components of the application and project
|
||||
func printMountedStorageInAllComponent(client *occlient.Client, applicationName string) {
|
||||
componentList, err := component.List(client, applicationName)
|
||||
odoutil.LogErrorAndExit(err, "could not get component list")
|
||||
|
||||
// iterating over all the components in the given aplication and project
|
||||
for _, component := range componentList.Items {
|
||||
printMountedStorageInComponent(client, component.Name, applicationName)
|
||||
}
|
||||
}
|
||||
|
||||
// printUnmountedStorage prints all the unmounted storage in the application
|
||||
func printUnmountedStorage(client *occlient.Client, applicationName string) {
|
||||
|
||||
// defining the column structure of the unmounted storage table
|
||||
tabWriterUnmounted := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent)
|
||||
|
||||
// create header of unmounted storage in all the components of the given application and project
|
||||
fmt.Fprintln(tabWriterUnmounted, "NAME", "\t", "SIZE")
|
||||
|
||||
storageListUnmounted, err := storage.ListUnmounted(client, applicationName)
|
||||
odoutil.LogErrorAndExit(err, "could not get unmounted storage list")
|
||||
|
||||
// iterating over all unmounted storage and put in the unmount storage table
|
||||
if len(storageListUnmounted.Items) > 0 {
|
||||
for _, uStorage := range storageListUnmounted.Items {
|
||||
fmt.Fprintln(tabWriterUnmounted, uStorage.Name, "\t", uStorage.Spec.Size)
|
||||
}
|
||||
|
||||
// print unmounted storage of all the application
|
||||
log.Info("Storage that are not mounted to any component:")
|
||||
tabWriterUnmounted.Flush()
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
odoutil "github.com/openshift/odo/pkg/odo/util"
|
||||
"github.com/openshift/odo/pkg/storage"
|
||||
@@ -24,25 +23,25 @@ func NewCmdStorage(name, fullName string) *cobra.Command {
|
||||
storageCreateCmd := NewCmdStorageCreate(createRecommendedCommandName, odoutil.GetFullName(fullName, createRecommendedCommandName))
|
||||
storageDeleteCmd := NewCmdStorageDelete(deleteRecommendedCommandName, odoutil.GetFullName(fullName, deleteRecommendedCommandName))
|
||||
storageListCmd := NewCmdStorageList(listRecommendedCommandName, odoutil.GetFullName(fullName, listRecommendedCommandName))
|
||||
storageMountCmd := NewCmdStorageMount(mountRecommendedCommandName, odoutil.GetFullName(fullName, mountRecommendedCommandName))
|
||||
storageUnMountCmd := NewCmdStorageUnMount(unMountRecommendedCommandName, odoutil.GetFullName(fullName, unMountRecommendedCommandName))
|
||||
//storageMountCmd := NewCmdStorageMount(mountRecommendedCommandName, odoutil.GetFullName(fullName, mountRecommendedCommandName))
|
||||
//storageUnMountCmd := NewCmdStorageUnMount(unMountRecommendedCommandName, odoutil.GetFullName(fullName, unMountRecommendedCommandName))
|
||||
|
||||
var storageCmd = &cobra.Command{
|
||||
Use: name,
|
||||
Short: storageShortDesc,
|
||||
Long: storageLongDesc,
|
||||
Example: fmt.Sprintf("%s\n%s\n%s\n%s",
|
||||
Example: fmt.Sprintf("%s\n%s\n%s",
|
||||
storageCreateCmd.Example,
|
||||
storageDeleteCmd.Example,
|
||||
storageUnMountCmd.Example,
|
||||
//storageUnMountCmd.Example,
|
||||
storageListCmd.Example),
|
||||
}
|
||||
|
||||
storageCmd.AddCommand(storageCreateCmd)
|
||||
storageCmd.AddCommand(storageDeleteCmd)
|
||||
storageCmd.AddCommand(storageUnMountCmd)
|
||||
//storageCmd.AddCommand(storageUnMountCmd)
|
||||
storageCmd.AddCommand(storageListCmd)
|
||||
storageCmd.AddCommand(storageMountCmd)
|
||||
//storageCmd.AddCommand(storageMountCmd)
|
||||
|
||||
// Add a defined annotation in order to appear in the help menu
|
||||
storageCmd.Annotations = map[string]string{"command": "other"}
|
||||
|
||||
@@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
@@ -10,7 +11,6 @@ import (
|
||||
"github.com/openshift/odo/pkg/component"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
storagePkg "github.com/openshift/odo/pkg/storage"
|
||||
urlPkg "github.com/openshift/odo/pkg/url"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -46,6 +46,10 @@ func CheckOutputFlag(outputFlag string) error {
|
||||
|
||||
// PrintComponentInfo prints Component Information like path, URL & storage
|
||||
func PrintComponentInfo(client *occlient.Client, currentComponentName string, componentDesc component.Component, applicationName string) {
|
||||
localConfig, err := config.New()
|
||||
if err != nil {
|
||||
LogErrorAndExit(err, "")
|
||||
}
|
||||
fmt.Printf("Component Name: %v\nType: %v\n", currentComponentName, componentDesc.Spec.Type)
|
||||
// Source
|
||||
if componentDesc.Spec.Source != "" {
|
||||
@@ -62,11 +66,10 @@ func PrintComponentInfo(client *occlient.Client, currentComponentName string, co
|
||||
// Storage
|
||||
if len(componentDesc.Spec.Storage) > 0 {
|
||||
fmt.Println("\nStorage:")
|
||||
storages, err := storagePkg.List(client, currentComponentName, applicationName)
|
||||
storages, err := localConfig.StorageList()
|
||||
LogErrorAndExit(err, "")
|
||||
for _, storage := range componentDesc.Spec.Storage {
|
||||
store := storages.Get(storage)
|
||||
fmt.Printf(" - %v of size %v mounted to %v\n", store.Name, store.Spec.Size, store.Status.Path)
|
||||
for _, store := range storages {
|
||||
fmt.Printf(" - %v of size %v mounted to %v\n", store.Name, store.Size, store.Path)
|
||||
}
|
||||
}
|
||||
// URL
|
||||
|
||||
@@ -2,6 +2,7 @@ package completion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/config"
|
||||
"strings"
|
||||
|
||||
appsv1 "github.com/openshift/api/apps/v1"
|
||||
@@ -205,12 +206,18 @@ var URLCompletionHandler = func(cmd *cobra.Command, args parsedArgs, context *ge
|
||||
// StorageDeleteCompletionHandler provides storage name completion for storage delete
|
||||
var StorageDeleteCompletionHandler = func(cmd *cobra.Command, args parsedArgs, context *genericclioptions.Context) (completions []string) {
|
||||
completions = make([]string, 0)
|
||||
storageList, err := storage.List(context.Client, context.Component(), context.Application)
|
||||
|
||||
localConfig, err := config.New()
|
||||
if err != nil {
|
||||
return completions
|
||||
}
|
||||
|
||||
for _, storage := range storageList.Items {
|
||||
storageList, err := localConfig.StorageList()
|
||||
if err != nil {
|
||||
return completions
|
||||
}
|
||||
|
||||
for _, storage := range storageList {
|
||||
// we found the storage name in the list which means
|
||||
// that the storage name has been already selected by the user so no need to suggest more
|
||||
if args.commands[storage.Name] {
|
||||
|
||||
@@ -2,18 +2,18 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/log"
|
||||
|
||||
applabels "github.com/openshift/odo/pkg/application/labels"
|
||||
componentlabels "github.com/openshift/odo/pkg/component/labels"
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
storagelabels "github.com/openshift/odo/pkg/storage/labels"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// Get returns Storage defination for given Storage name
|
||||
@@ -28,14 +28,14 @@ func (storages StorageList) Get(storageName string) Storage {
|
||||
}
|
||||
|
||||
// Create adds storage to given component of given application
|
||||
func Create(client *occlient.Client, name string, size string, path string, componentName string, applicationName string) (Storage, error) {
|
||||
func Create(client *occlient.Client, name string, size string, componentName string, applicationName string) (*corev1.PersistentVolumeClaim, error) {
|
||||
|
||||
// Namespace the component
|
||||
// We will use name+applicationName instead of componentName+applicationName until:
|
||||
// https://github.com/openshift/odo/issues/504 is resolved.
|
||||
namespacedOpenShiftObject, err := util.NamespaceOpenShiftObject(name, applicationName)
|
||||
if err != nil {
|
||||
return Storage{}, errors.Wrapf(err, "unable to create namespaced name")
|
||||
return nil, errors.Wrapf(err, "unable to create namespaced name")
|
||||
}
|
||||
|
||||
labels := storagelabels.GetLabels(name, componentName, applicationName, true)
|
||||
@@ -45,25 +45,9 @@ func Create(client *occlient.Client, name string, size string, path string, comp
|
||||
// Create PVC
|
||||
pvc, err := client.CreatePVC(generatePVCNameFromStorageName(namespacedOpenShiftObject), size, labels)
|
||||
if err != nil {
|
||||
return Storage{}, errors.Wrap(err, "unable to create PVC")
|
||||
return nil, errors.Wrap(err, "unable to create PVC")
|
||||
}
|
||||
|
||||
// Get DeploymentConfig for the given component
|
||||
componentLabels := componentlabels.GetLabels(componentName, applicationName, false)
|
||||
componentSelector := util.ConvertLabelsToSelector(componentLabels)
|
||||
dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector)
|
||||
if err != nil {
|
||||
return Storage{}, errors.Wrapf(err, "unable to get Deployment Config for component: %v in application: %v", componentName, applicationName)
|
||||
}
|
||||
glog.V(4).Infof("Deployment Config: %v is associated with the component: %v", dc.Name, componentName)
|
||||
|
||||
// Add PVC to DeploymentConfig
|
||||
if err := client.AddPVCToDeploymentConfig(dc, pvc.Name, path); err != nil {
|
||||
return Storage{}, errors.Wrap(err, "unable to add PVC to DeploymentConfig")
|
||||
}
|
||||
|
||||
// getting the machine readable output format and mark status as active
|
||||
return getMachineReadableFormat(*pvc, path), nil
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
// Unmount unmounts the given storage from the given component
|
||||
@@ -106,35 +90,23 @@ func Unmount(client *occlient.Client, storageName string, componentName string,
|
||||
|
||||
// Delete removes storage from the given application.
|
||||
// Delete returns the component name, if it is mounted to a component, or "" and the error, if any
|
||||
func Delete(client *occlient.Client, name string, applicationName string) (string, error) {
|
||||
// unmount the storage from the component if mounted
|
||||
componentName, err := GetComponentNameFromStorageName(client, name)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to find component name and app name")
|
||||
}
|
||||
if componentName != "" {
|
||||
err := Unmount(client, name, componentName, applicationName, false)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to unmount storage %v", name)
|
||||
}
|
||||
}
|
||||
|
||||
func Delete(client *occlient.Client, name string) error {
|
||||
pvcName, err := getPVCNameFromStorageName(client, name)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to get PVC for storage %v", name)
|
||||
return errors.Wrapf(err, "unable to get PVC for storage %v", name)
|
||||
}
|
||||
|
||||
// delete the associated PVC with the component
|
||||
err = client.DeletePVC(pvcName)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to delete PVC %v", pvcName)
|
||||
return errors.Wrapf(err, "unable to delete PVC %v", pvcName)
|
||||
}
|
||||
|
||||
return componentName, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all the mounted storage associated with the given component of the given
|
||||
// application and the unmounted storages in the given application
|
||||
// application and the unmounted storage in the given application
|
||||
func List(client *occlient.Client, componentName string, applicationName string) (StorageList, error) {
|
||||
componentLabels := componentlabels.GetLabels(componentName, applicationName, false)
|
||||
componentSelector := util.ConvertLabelsToSelector(componentLabels)
|
||||
@@ -144,7 +116,19 @@ func List(client *occlient.Client, componentName string, applicationName string)
|
||||
return StorageList{}, errors.Wrapf(err, "unable to get Deployment Config associated with component %v", componentName)
|
||||
}
|
||||
|
||||
// store the storages in a map for faster searching with the key instead of list
|
||||
pvcs, err := client.GetPVCsFromSelector(storagelabels.StorageLabel)
|
||||
if err != nil {
|
||||
return StorageList{}, errors.Wrapf(err, "unable to get PVC using selector %v", storagelabels.StorageLabel)
|
||||
}
|
||||
|
||||
pvcMap := make(map[string]*corev1.PersistentVolumeClaim)
|
||||
// store in map for faster searching
|
||||
for _, pvc := range pvcs {
|
||||
readPVC := pvc
|
||||
pvcMap[pvc.Name] = &readPVC
|
||||
}
|
||||
|
||||
// store the storage in a map for faster searching with the key instead of list
|
||||
mountedStorageMap := make(map[string]string)
|
||||
volumeMounts := client.GetVolumeMountsFromDC(dc)
|
||||
for _, volumeMount := range volumeMounts {
|
||||
@@ -158,19 +142,21 @@ func List(client *occlient.Client, componentName string, applicationName string)
|
||||
if pvcName == "" {
|
||||
return StorageList{}, fmt.Errorf("no PVC associated with Volume Mount %v", volumeMount.Name)
|
||||
}
|
||||
pvc, err := client.GetPVCFromName(pvcName)
|
||||
if err != nil {
|
||||
return StorageList{}, errors.Wrapf(err, "unable to get PVC %v", pvcName)
|
||||
|
||||
pvc, ok := pvcMap[pvcName]
|
||||
if !ok {
|
||||
// since the pvc doesn't exist, it might be a supervisorD volume
|
||||
// if true, continue
|
||||
if client.IsAppSupervisorDVolume(volumeMount.Name, dc.Name) {
|
||||
continue
|
||||
}
|
||||
return StorageList{}, fmt.Errorf("unable to get PVC %v", pvcName)
|
||||
}
|
||||
|
||||
storageName := getStorageFromPVC(pvc)
|
||||
mountedStorageMap[storageName] = volumeMount.MountPath
|
||||
}
|
||||
|
||||
pvcs, err := client.GetPVCsFromSelector(storagelabels.StorageLabel)
|
||||
if err != nil {
|
||||
return StorageList{}, errors.Wrapf(err, "unable to get PVC using selector %v", storagelabels.StorageLabel)
|
||||
}
|
||||
var storage []Storage
|
||||
for _, pvc := range pvcs {
|
||||
pvcComponentName, ok := pvc.Labels[componentlabels.ComponentLabel]
|
||||
@@ -183,11 +169,15 @@ func List(client *occlient.Client, componentName string, applicationName string)
|
||||
return StorageList{}, fmt.Errorf("no PVC associated")
|
||||
}
|
||||
storageName := getStorageFromPVC(&pvc)
|
||||
storageMachineReadable := getMachineReadableFormat(pvc, mountedStorageMap[storageName])
|
||||
storageSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
storageMachineReadable := GetMachineReadableFormat(getStorageFromPVC(&pvc),
|
||||
storageSize.String(),
|
||||
mountedStorageMap[storageName],
|
||||
)
|
||||
storage = append(storage, storageMachineReadable)
|
||||
}
|
||||
}
|
||||
storageList := getMachineReadableFormatForList(storage)
|
||||
storageList := GetMachineReadableFormatForList(storage)
|
||||
return storageList, nil
|
||||
}
|
||||
|
||||
@@ -203,7 +193,7 @@ func ListMounted(client *occlient.Client, componentName string, applicationName
|
||||
storageListMounted = append(storageListMounted, storage)
|
||||
}
|
||||
}
|
||||
return getMachineReadableFormatForList(storageListMounted), nil
|
||||
return GetMachineReadableFormatForList(storageListMounted), nil
|
||||
}
|
||||
|
||||
// ListUnmounted lists all the unmounted storage associated with the given application
|
||||
@@ -222,11 +212,15 @@ func ListUnmounted(client *occlient.Client, applicationName string) (StorageList
|
||||
if pvc.Name == "" {
|
||||
return StorageList{}, fmt.Errorf("no PVC associated")
|
||||
}
|
||||
storageMachineReadable := getMachineReadableFormat(pvc, "")
|
||||
storageSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
storageMachineReadable := GetMachineReadableFormat(getStorageFromPVC(&pvc),
|
||||
storageSize.String(),
|
||||
"",
|
||||
)
|
||||
storage = append(storage, storageMachineReadable)
|
||||
}
|
||||
}
|
||||
storageList := getMachineReadableFormatForList(storage)
|
||||
storageList := GetMachineReadableFormatForList(storage)
|
||||
return storageList, nil
|
||||
}
|
||||
|
||||
@@ -316,18 +310,6 @@ func IsMounted(client *occlient.Client, storageName string, componentName string
|
||||
return false, nil
|
||||
}
|
||||
|
||||
//GetMountPath returns mount path for given storage
|
||||
func GetMountPath(client *occlient.Client, storageName string, componentName string, applicationName string) string {
|
||||
var mPath string
|
||||
storageList, _ := List(client, componentName, applicationName)
|
||||
for _, storage := range storageList.Items {
|
||||
if storage.Name == storageName {
|
||||
mPath = storage.Status.Path
|
||||
}
|
||||
}
|
||||
return mPath
|
||||
}
|
||||
|
||||
// Mount mounts the given storage to the given component
|
||||
func Mount(client *occlient.Client, path string, storageName string, componentName string, applicationName string) error {
|
||||
storageComponent, err := GetComponentNameFromStorageName(client, storageName)
|
||||
@@ -383,8 +365,76 @@ func GetStorageNameFromMountPath(client *occlient.Client, path string, component
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// getMachineReadableFormatForList gives machine readable StorageList definition
|
||||
func getMachineReadableFormatForList(storage []Storage) StorageList {
|
||||
// Push creates/deletes the required storage during `odo push`
|
||||
// storageList are the storage mentioned in the config
|
||||
// isComponentExists indicates if the component exists or not, if not, we don't run the list operation
|
||||
// returns the storage for mounting and unMounting from the DC
|
||||
// StorageToBeMounted describes the storage to be mounted
|
||||
// StorageToBeMounted : storagePath is the key of the map, the generatedPVC is the value of the map
|
||||
// StorageToBeUnMounted describes the storage to be unmounted
|
||||
// StorageToBeUnMounted : path is the key of the map,storageName is the value of the map
|
||||
func Push(client *occlient.Client, storageList StorageList, componentName, applicationName string, isComponentExits bool) (map[string]*corev1.PersistentVolumeClaim, map[string]string, error) {
|
||||
// list all the storage in the cluster
|
||||
storageClusterList := StorageList{}
|
||||
var err error
|
||||
if isComponentExits {
|
||||
storageClusterList, err = ListMounted(client, componentName, applicationName)
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
storageClusterNames := make(map[string]Storage)
|
||||
for _, storage := range storageClusterList.Items {
|
||||
storageClusterNames[storage.Name] = storage
|
||||
}
|
||||
|
||||
// list all the storage in the config
|
||||
storageConfigNames := make(map[string]Storage)
|
||||
for _, storage := range storageList.Items {
|
||||
storageConfigNames[storage.Name] = storage
|
||||
}
|
||||
|
||||
storageToBeMounted := make(map[string]*corev1.PersistentVolumeClaim)
|
||||
storageToBeUnMounted := make(map[string]string)
|
||||
|
||||
// find storage to delete
|
||||
for _, storage := range storageClusterList.Items {
|
||||
val, ok := storageConfigNames[storage.Name]
|
||||
if !ok {
|
||||
// delete the pvc
|
||||
err = Delete(client, storage.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
log.Successf("Deleted storage %v from %v", storage.Name, componentName)
|
||||
storageToBeUnMounted[storage.Status.Path] = storage.Name
|
||||
continue
|
||||
} else if storage.Name == val.Name {
|
||||
if val.Spec.Size != storage.Spec.Size || val.Status.Path != storage.Status.Path {
|
||||
return nil, nil, errors.Errorf("config mismatch for storage with the same name %s", storage.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find storage to create
|
||||
for _, storage := range storageList.Items {
|
||||
_, ok := storageClusterNames[storage.Name]
|
||||
if !ok {
|
||||
createdPVC, err := Create(client, storage.Name, storage.Spec.Size, componentName, applicationName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
log.Successf("Added storage %v to %v", storage.Name, componentName)
|
||||
storageToBeMounted[storage.Status.Path] = createdPVC
|
||||
}
|
||||
}
|
||||
|
||||
return storageToBeMounted, storageToBeUnMounted, err
|
||||
}
|
||||
|
||||
// GetMachineReadableFormatForList gives machine readable StorageList definition
|
||||
func GetMachineReadableFormatForList(storage []Storage) StorageList {
|
||||
return StorageList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "List",
|
||||
@@ -395,20 +445,17 @@ func getMachineReadableFormatForList(storage []Storage) StorageList {
|
||||
}
|
||||
}
|
||||
|
||||
// getMachineReadableFormat gives machine readable Storage definition
|
||||
// GetMachineReadableFormat gives machine readable Storage definition
|
||||
// storagePath indicates the path to which the storage is mounted to, "" if not mounted
|
||||
func getMachineReadableFormat(pvc corev1.PersistentVolumeClaim, storagePath string) Storage {
|
||||
storageName := getStorageFromPVC(&pvc)
|
||||
storageSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
func GetMachineReadableFormat(storageName, storageSize, storagePath string) Storage {
|
||||
return Storage{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "storage", APIVersion: "odo.openshift.io/v1alpha1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: storageName},
|
||||
Spec: StorageSpec{
|
||||
Size: storageSize.String(),
|
||||
Size: storageSize,
|
||||
},
|
||||
Status: StorageStatus{
|
||||
Path: storagePath,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,17 +1,29 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/openshift/odo/pkg/occlient"
|
||||
"github.com/openshift/odo/pkg/testingutil"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/openshift/api/apps/v1"
|
||||
appLabels "github.com/openshift/odo/pkg/application/labels"
|
||||
componentLabels "github.com/openshift/odo/pkg/component/labels"
|
||||
"github.com/openshift/odo/pkg/storage/labels"
|
||||
storagelabels "github.com/openshift/odo/pkg/storage/labels"
|
||||
storageLabels "github.com/openshift/odo/pkg/storage/labels"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func Test_getStorageFromPVC(t *testing.T) {
|
||||
func getStorageLabels(storageName, componentName, applicationName string) map[string]string {
|
||||
return storageLabels.GetLabels(storageName, componentName, applicationName, true)
|
||||
}
|
||||
|
||||
func Test_GetStorageFromPVC(t *testing.T) {
|
||||
type args struct {
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
}
|
||||
@@ -55,35 +67,19 @@ func Test_getStorageFromPVC(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getMachineReadableFormat(t *testing.T) {
|
||||
quantity, err := resource.ParseQuantity("100Mi")
|
||||
if err != nil {
|
||||
t.Errorf("unable to parse size")
|
||||
}
|
||||
func TestGetMachineReadableFormat(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputPVC *corev1.PersistentVolumeClaim
|
||||
storageName string
|
||||
storageSize string
|
||||
mountedPath string
|
||||
activeStatus bool
|
||||
want Storage
|
||||
}{
|
||||
{
|
||||
name: "test case 1: with a pvc, valid path and mounted status",
|
||||
inputPVC: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-example",
|
||||
Labels: map[string]string{
|
||||
storagelabels.StorageLabel: "pvc-example",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "test case 1: with a pvc, valid path and mounted status",
|
||||
storageName: "pvc-example",
|
||||
storageSize: "100Mi",
|
||||
mountedPath: "data",
|
||||
activeStatus: true,
|
||||
want: Storage{
|
||||
@@ -100,22 +96,9 @@ func Test_getMachineReadableFormat(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test case 2: with a pvc, empty path and unmounted status",
|
||||
inputPVC: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-example",
|
||||
Labels: map[string]string{
|
||||
storagelabels.StorageLabel: "pvc-example",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "test case 2: with a pvc, empty path and unmounted status",
|
||||
storageName: "pvc-example",
|
||||
storageSize: "500Mi",
|
||||
mountedPath: "",
|
||||
activeStatus: false,
|
||||
want: Storage{
|
||||
@@ -124,7 +107,7 @@ func Test_getMachineReadableFormat(t *testing.T) {
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{Kind: "storage", APIVersion: "odo.openshift.io/v1alpha1"},
|
||||
Spec: StorageSpec{
|
||||
Size: "100Mi",
|
||||
Size: "500Mi",
|
||||
},
|
||||
Status: StorageStatus{
|
||||
Path: "",
|
||||
@@ -134,7 +117,7 @@ func Test_getMachineReadableFormat(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotStorage := getMachineReadableFormat(*tt.inputPVC, tt.mountedPath)
|
||||
gotStorage := GetMachineReadableFormat(tt.storageName, tt.storageSize, tt.mountedPath)
|
||||
if !reflect.DeepEqual(tt.want, gotStorage) {
|
||||
t.Errorf("the returned storage is different, expected: %v, got: %v", tt.want, gotStorage)
|
||||
}
|
||||
@@ -142,7 +125,7 @@ func Test_getMachineReadableFormat(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getMachineReadableFormatForList(t *testing.T) {
|
||||
func TestGetMachineReadableFormatForList(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -270,10 +253,599 @@ func Test_getMachineReadableFormatForList(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotStorage := getMachineReadableFormatForList(tt.inputStorage)
|
||||
gotStorage := GetMachineReadableFormatForList(tt.inputStorage)
|
||||
if !reflect.DeepEqual(tt.want, gotStorage) {
|
||||
t.Errorf("the returned storage is different, expected: %v, got: %v", tt.want, gotStorage)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
type args struct {
|
||||
name string
|
||||
size string
|
||||
componentName string
|
||||
applicationName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantLabels map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: with valid values",
|
||||
args: args{
|
||||
name: "storage-0",
|
||||
size: "100Mi",
|
||||
componentName: "nodejs-ex",
|
||||
applicationName: "app-ex",
|
||||
},
|
||||
wantLabels: map[string]string{
|
||||
"app": "app-ex",
|
||||
labels.StorageLabel: "storage-0",
|
||||
appLabels.ApplicationLabel: "app-ex",
|
||||
componentLabels.ComponentLabel: "nodejs-ex",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fkclient, fkclientset := occlient.FakeNew()
|
||||
|
||||
createdPVC, err := Create(fkclient, tt.args.name, tt.args.size, tt.args.componentName, tt.args.applicationName)
|
||||
|
||||
// Check for validating actions performed
|
||||
if (len(fkclientset.Kubernetes.Actions()) != 1) && (tt.wantErr != true) {
|
||||
t.Errorf("expected 1 action in CreatePVC got: %v", fkclientset.RouteClientset.Actions())
|
||||
}
|
||||
|
||||
// Checks for return values in positive cases
|
||||
if err == nil {
|
||||
quantity, err := resource.ParseQuantity(tt.args.size)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create quantity by calling resource.ParseQuantity(%v)", tt.args.size)
|
||||
}
|
||||
|
||||
// created PVC should be labeled with labels passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Labels, tt.wantLabels) {
|
||||
t.Errorf("labels in created route is not matching expected labels, expected: %v, got: %v", tt.wantLabels, createdPVC.Labels)
|
||||
}
|
||||
// name, size of createdPVC should be matching to size, name passed to CreatePVC
|
||||
if !reflect.DeepEqual(createdPVC.Spec.Resources.Requests["storage"], quantity) {
|
||||
t.Errorf("size of PVC is not matching to expected size, expected: %v, got %v", quantity, createdPVC.Spec.Resources.Requests["storage"])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
|
||||
mountMap := make(map[string]*corev1.PersistentVolumeClaim)
|
||||
|
||||
pvc1 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-1-app"), "100Mi", getStorageLabels("storage-1", "nodejs", "app"))
|
||||
mountMap["data"] = pvc1
|
||||
|
||||
pvc2 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-2-app"), "500Mi", getStorageLabels("storage-2", "nodejs", "app"))
|
||||
mountMap["data-1"] = pvc2
|
||||
|
||||
// pvc mounted to different app
|
||||
pvc3 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-3-app"), "100Mi", getStorageLabels("storage-3", "wildfly", "app"))
|
||||
|
||||
// unMounted pvc
|
||||
pvc4 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-4-app"), "100Mi", getStorageLabels("storage-4", "", "app"))
|
||||
delete(pvc4.Labels, componentLabels.ComponentLabel)
|
||||
|
||||
// mounted to the deploymentConfig but not returned and thus doesn't exist on the cluster
|
||||
pvc5 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-5-app"), "100Mi", getStorageLabels("storage-5", "nodejs", "app"))
|
||||
|
||||
type args struct {
|
||||
componentName string
|
||||
applicationName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
componentType string
|
||||
returnedPVCs corev1.PersistentVolumeClaimList
|
||||
mountedMap map[string]*corev1.PersistentVolumeClaim
|
||||
wantedStorageList StorageList
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: no error and all PVCs mounted",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc2,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 2: no error and two PVCs mounted and one mounted to a different app",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2, *pvc3,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc2,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 3: no error and two PVCs mounted and one unmounted",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2, *pvc4,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc2,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
GetMachineReadableFormat("storage-4", "100Mi", ""),
|
||||
}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 4: pvc mounted but doesn't exist on cluster",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc5,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
}),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientSet := occlient.FakeNew()
|
||||
|
||||
dcTesting := testingutil.OneFakeDeploymentConfigWithMounts(tt.args.componentName, tt.componentType, tt.args.applicationName, tt.mountedMap)
|
||||
|
||||
fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.DeploymentConfigList{
|
||||
Items: []v1.DeploymentConfig{
|
||||
*dcTesting,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("list", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &tt.returnedPVCs, nil
|
||||
})
|
||||
|
||||
storageList, err := List(fakeClient, tt.args.componentName, tt.args.applicationName)
|
||||
if err == nil && !tt.wantErr {
|
||||
if !reflect.DeepEqual(storageList, tt.wantedStorageList) {
|
||||
t.Errorf("storageList not equal, expected: %v, got: %v", tt.wantedStorageList, storageList.Items)
|
||||
}
|
||||
} else if err == nil && tt.wantErr {
|
||||
t.Error("test failed, expected: false, got true")
|
||||
} else if err != nil && !tt.wantErr {
|
||||
t.Errorf("test failed, expected: no error, got error: %s", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListMounted(t *testing.T) {
|
||||
mountMap := make(map[string]*corev1.PersistentVolumeClaim)
|
||||
|
||||
pvc1 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-1-app"), "100Mi", getStorageLabels("storage-1", "nodejs", "app"))
|
||||
mountMap["data"] = pvc1
|
||||
|
||||
pvc2 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-2-app"), "500Mi", getStorageLabels("storage-2", "nodejs", "app"))
|
||||
mountMap["data-1"] = pvc2
|
||||
|
||||
// unMounted pvc
|
||||
pvc3 := testingutil.FakePVC(generatePVCNameFromStorageName("storage-3-app"), "100Mi", getStorageLabels("storage-3", "", "app"))
|
||||
delete(pvc3.Labels, componentLabels.ComponentLabel)
|
||||
|
||||
type args struct {
|
||||
componentName string
|
||||
applicationName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
componentType string
|
||||
returnedPVCs corev1.PersistentVolumeClaimList
|
||||
mountedMap map[string]*corev1.PersistentVolumeClaim
|
||||
wantedStorageList StorageList
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: no error and all PVCs mounted",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc2,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 3: no error and two PVCs mounted and one unmounted",
|
||||
args: args{
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{
|
||||
Items: []corev1.PersistentVolumeClaim{
|
||||
*pvc1, *pvc2, *pvc3,
|
||||
},
|
||||
},
|
||||
mountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"/data": pvc1,
|
||||
"/data-1": pvc2,
|
||||
},
|
||||
wantedStorageList: GetMachineReadableFormatForList([]Storage{
|
||||
GetMachineReadableFormat("storage-1", "100Mi", "/data"),
|
||||
GetMachineReadableFormat("storage-2", "500Mi", "/data-1"),
|
||||
}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientSet := occlient.FakeNew()
|
||||
|
||||
dcTesting := testingutil.OneFakeDeploymentConfigWithMounts(tt.args.componentName, tt.componentType, tt.args.applicationName, tt.mountedMap)
|
||||
|
||||
fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.DeploymentConfigList{
|
||||
Items: []v1.DeploymentConfig{
|
||||
*dcTesting,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("list", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &tt.returnedPVCs, nil
|
||||
})
|
||||
|
||||
storageList, err := ListMounted(fakeClient, tt.args.componentName, tt.args.applicationName)
|
||||
if err == nil && !tt.wantErr {
|
||||
if !reflect.DeepEqual(storageList, tt.wantedStorageList) {
|
||||
t.Errorf("storageList not equal, expected: %v, got: %v", tt.wantedStorageList, storageList.Items)
|
||||
}
|
||||
} else if err == nil && tt.wantErr {
|
||||
t.Error("test failed, expected: false, got true")
|
||||
} else if err != nil && !tt.wantErr {
|
||||
t.Errorf("test failed, expected: no error, got error: %s", err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
mountMap := make(map[string]*corev1.PersistentVolumeClaim)
|
||||
|
||||
pvc1 := testingutil.FakePVC(generatePVCNameFromStorageName("backend-app"), "100Mi", getStorageLabels("backend", "nodejs", "app"))
|
||||
mountMap["data"] = pvc1
|
||||
|
||||
pvc2 := testingutil.FakePVC(generatePVCNameFromStorageName("backend-1-app"), "500Mi", getStorageLabels("backend-1", "nodejs", "app"))
|
||||
mountMap["data-1"] = pvc2
|
||||
|
||||
pvc3 := testingutil.FakePVC(generatePVCNameFromStorageName("backend-2-app"), "100Mi", getStorageLabels("backend-2", "nodejs", "app"))
|
||||
mountMap["data-2"] = pvc3
|
||||
|
||||
type args struct {
|
||||
storageList StorageList
|
||||
componentName string
|
||||
applicationName string
|
||||
isComponentExists bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
componentType string
|
||||
returnedPVCs corev1.PersistentVolumeClaimList
|
||||
dcMountedMap map[string]*corev1.PersistentVolumeClaim
|
||||
storageToMount map[string]*corev1.PersistentVolumeClaim
|
||||
storageToUnMount map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "case 1: component does not exist, no pvc on cluster and two on config",
|
||||
args: args{
|
||||
storageList: StorageList{
|
||||
Items: []Storage{
|
||||
GetMachineReadableFormat("backend", "100Mi", "data"),
|
||||
GetMachineReadableFormat("backend-1", "500Mi", "data-1"),
|
||||
},
|
||||
},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: false,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-1": pvc2,
|
||||
},
|
||||
storageToUnMount: map[string]string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 2: component exists, two pvc on cluster and none on config",
|
||||
args: args{
|
||||
storageList: StorageList{Items: []Storage{}},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{*pvc1, *pvc2}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-1": pvc2,
|
||||
},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToUnMount: map[string]string{
|
||||
"data": getStorageFromPVC(pvc1),
|
||||
"data-1": getStorageFromPVC(pvc2),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 3: component exists, three PVCs, two in config and cluster but one not in config",
|
||||
args: args{
|
||||
storageList: StorageList{
|
||||
Items: []Storage{
|
||||
GetMachineReadableFormat("backend", "100Mi", "data"),
|
||||
GetMachineReadableFormat("backend-1", "500Mi", "data-1"),
|
||||
},
|
||||
},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{*pvc1, *pvc2, *pvc3}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-1": pvc2,
|
||||
"data-2": pvc3,
|
||||
},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToUnMount: map[string]string{
|
||||
"data-2": getStorageFromPVC(pvc3),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 4: component exists, three PVCs, one in config and cluster, one not in cluster and one not in config",
|
||||
args: args{
|
||||
storageList: StorageList{Items: []Storage{
|
||||
GetMachineReadableFormat("backend", "100Mi", "data"),
|
||||
GetMachineReadableFormat("backend-1", "500Mi", "data-1"),
|
||||
}},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{*pvc1, *pvc3}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-2": pvc3,
|
||||
},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data-1": pvc2,
|
||||
},
|
||||
storageToUnMount: map[string]string{
|
||||
"data-2": getStorageFromPVC(pvc3),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "case 5: component exists, two PVCs, both on cluster and config, but one with path config mismatch",
|
||||
args: args{
|
||||
storageList: StorageList{
|
||||
Items: []Storage{
|
||||
GetMachineReadableFormat("backend", "100Mi", "data"),
|
||||
GetMachineReadableFormat("backend-1", "500Mi", "data-100"),
|
||||
},
|
||||
},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{*pvc1, *pvc2}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-1": pvc2,
|
||||
},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToUnMount: map[string]string{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "case 6: component exists, two PVCs, both on cluster and config, but one with size config mismatch",
|
||||
args: args{
|
||||
storageList: StorageList{
|
||||
Items: []Storage{
|
||||
GetMachineReadableFormat("backend", "100Mi", "data"),
|
||||
GetMachineReadableFormat("backend-1", "50Mi", "data-1"),
|
||||
},
|
||||
},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{*pvc1, *pvc2}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{
|
||||
"data": pvc1,
|
||||
"data-1": pvc2,
|
||||
},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToUnMount: map[string]string{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "case 7: component exists, one pvc on config and cluster",
|
||||
args: args{
|
||||
storageList: StorageList{Items: []Storage{}},
|
||||
componentName: "nodejs",
|
||||
applicationName: "app",
|
||||
isComponentExists: true,
|
||||
},
|
||||
returnedPVCs: corev1.PersistentVolumeClaimList{Items: []corev1.PersistentVolumeClaim{}},
|
||||
dcMountedMap: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToMount: map[string]*corev1.PersistentVolumeClaim{},
|
||||
storageToUnMount: map[string]string{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient, fakeClientSet := occlient.FakeNew()
|
||||
|
||||
dcTesting := testingutil.OneFakeDeploymentConfigWithMounts(tt.args.componentName, tt.componentType, tt.args.applicationName, tt.dcMountedMap)
|
||||
|
||||
fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.DeploymentConfigList{
|
||||
Items: []v1.DeploymentConfig{
|
||||
*dcTesting,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("list", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, &tt.returnedPVCs, nil
|
||||
})
|
||||
|
||||
fakeClientSet.Kubernetes.PrependReactor("delete", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
for _, storageName := range tt.storageToUnMount {
|
||||
namespacedOpenShiftObject, err := util.NamespaceOpenShiftObject(storageName, tt.args.applicationName)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
if generatePVCNameFromStorageName(namespacedOpenShiftObject) == action.(ktesting.DeleteAction).GetName() {
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
storageToMount, storageToUnmount, err := Push(fakeClient, tt.args.storageList, tt.args.componentName, tt.args.applicationName, tt.args.isComponentExists)
|
||||
|
||||
if err == nil && !tt.wantErr {
|
||||
// check if the len of the storageToMount values are the same as the required ones
|
||||
if len(storageToMount) != len(tt.storageToMount) {
|
||||
t.Errorf("storageToMount value mismatch, expected: %v, got: %v", len(tt.storageToMount), len(storageToMount))
|
||||
}
|
||||
|
||||
// check if the PVCs are created with the required specs and will be mounted to the required path
|
||||
var createdPVCs []*corev1.PersistentVolumeClaim
|
||||
for _, action := range fakeClientSet.Kubernetes.Actions() {
|
||||
if _, ok := action.(ktesting.CreateAction); !ok {
|
||||
continue
|
||||
}
|
||||
createdPVCs = append(createdPVCs, action.(ktesting.CreateAction).GetObject().(*corev1.PersistentVolumeClaim))
|
||||
}
|
||||
|
||||
for _, pvc := range storageToMount {
|
||||
found := false
|
||||
for _, createdPVC := range createdPVCs {
|
||||
if pvc.Name == createdPVC.Name {
|
||||
found = true
|
||||
createdPVCSize := createdPVC.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
pvcSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
if pvcSize.String() != createdPVCSize.String() {
|
||||
t.Errorf("pvc with name %v created with wrong size, expected: %v, got %v", pvc.Name, pvcSize, createdPVCSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("pvc with name %v not created", pvc.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for pathResult, pvcResult := range storageToMount {
|
||||
for path, pvc := range tt.storageToMount {
|
||||
if pvc.Name == pvcResult.Name {
|
||||
if path != pathResult {
|
||||
t.Errorf("pvc mounted to wrong path, expected: %v, got: %v", path, pathResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if the storageToUnMounted values match the required ones
|
||||
if !reflect.DeepEqual(storageToUnmount, tt.storageToUnMount) {
|
||||
t.Errorf("storageToUnmount is different, expected: %v, got: %v", tt.storageToUnMount, storageToUnmount)
|
||||
}
|
||||
|
||||
} else if err == nil && tt.wantErr {
|
||||
t.Error("test failed, expected: false, got true")
|
||||
} else if err != nil && !tt.wantErr {
|
||||
t.Errorf("test failed, expected: no error, got error: %s", err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package testingutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/odo/pkg/util"
|
||||
|
||||
v1 "github.com/openshift/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -155,3 +156,69 @@ func FakeDeploymentConfigs() *v1.DeploymentConfigList {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// mountedStorage is the map of the storage to be mounted
|
||||
// key is the path for the mount, value is the pvc
|
||||
func OneFakeDeploymentConfigWithMounts(componentName, componentType, applicationName string, mountedStorage map[string]*corev1.PersistentVolumeClaim) *v1.DeploymentConfig {
|
||||
c := getContainer(componentName, applicationName, []corev1.ContainerPort{
|
||||
{
|
||||
Name: fmt.Sprintf("%v-%v-p1", componentName, applicationName),
|
||||
ContainerPort: 8080,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("%v-%v-p2", componentName, applicationName),
|
||||
ContainerPort: 9090,
|
||||
Protocol: corev1.ProtocolUDP,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
dc := getDeploymentConfig("myproject", componentName, componentType, applicationName, []corev1.Container{c})
|
||||
|
||||
supervisorDPVC := FakePVC(getAppRootVolumeName(dc.Name), "1Gi", nil)
|
||||
|
||||
for path, pvc := range mountedStorage {
|
||||
volumeName := generateVolumeNameFromPVC(pvc.Name)
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
})
|
||||
dc.Spec.Template.Spec.Containers[0].VolumeMounts = append(dc.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
})
|
||||
}
|
||||
|
||||
// now append the supervisorD volume
|
||||
dc.Spec.Template.Spec.Volumes = append(dc.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: getAppRootVolumeName(dc.Name),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: supervisorDPVC.Name,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// now append the supervisorD volume mount
|
||||
dc.Spec.Template.Spec.Containers[0].VolumeMounts = append(dc.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: getAppRootVolumeName(dc.Name),
|
||||
MountPath: "/opt/app-root",
|
||||
SubPath: "app-root",
|
||||
})
|
||||
|
||||
return &dc
|
||||
}
|
||||
|
||||
// generateVolumeNameFromPVC generates a random volume name based on the name
|
||||
// of the given PVC
|
||||
func generateVolumeNameFromPVC(pvc string) string {
|
||||
return fmt.Sprintf("%v-%v-volume", pvc, util.GenerateRandomString(5))
|
||||
}
|
||||
|
||||
func getAppRootVolumeName(dcName string) string {
|
||||
return fmt.Sprintf("%s-s2idata", dcName)
|
||||
}
|
||||
|
||||
29
pkg/testingutil/pvcs.go
Normal file
29
pkg/testingutil/pvcs.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package testingutil
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func FakePVC(pvcName, size string, labels map[string]string) *corev1.PersistentVolumeClaim {
|
||||
quantity, err := resource.ParseQuantity(size)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pvcName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -20,3 +20,4 @@ make test-cmp-e2e
|
||||
make test-cmp-sub-e2e
|
||||
make test-odo-config
|
||||
make test-watch-e2e
|
||||
make test-storage-e2e
|
||||
@@ -271,3 +271,21 @@ func (oc *OcRunner) ServiceInstanceStatus(serviceInstanceName string) string {
|
||||
"-o", "go-template='{{ (index .status.conditions 0).reason}}'")
|
||||
return strings.TrimSpace(serviceinstance)
|
||||
}
|
||||
|
||||
// GetVolumeMountName returns the name of the volume
|
||||
func (oc *OcRunner) GetVolumeMountName(dcName string) string {
|
||||
volumeName := CmdShouldPass(oc.path, "get", "dc", dcName, "-o", "go-template='"+
|
||||
"{{range .spec.template.spec.containers}}"+
|
||||
"{{range .volumeMounts}}{{.name}}{{end}}{{end}}'")
|
||||
|
||||
return strings.TrimSpace(volumeName)
|
||||
}
|
||||
|
||||
// GetVolumeMountPath returns the path of the volume mount
|
||||
func (oc *OcRunner) GetVolumeMountPath(dcName string) string {
|
||||
volumePaths := CmdShouldPass(oc.path, "get", "dc", dcName, "-o", "go-template='"+
|
||||
"{{range .spec.template.spec.containers}}"+
|
||||
"{{range .volumeMounts}}{{.mountPath}} {{end}}{{end}}'")
|
||||
|
||||
return strings.TrimSpace(volumePaths)
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ var _ = Describe("odojsonoutput", func() {
|
||||
|
||||
// odo storage list -o json
|
||||
actualSrorageList := helper.CmdShouldPass("odo", "storage", "list", "-o", "json")
|
||||
desiredSrorageList := `{"kind":"List","apiVersion":"odo.openshift.io/v1aplha1","metadata":{},"items":[{"kind":"Storage","apiVersion":"odo.openshift.io/v1alpha1","metadata":{"name":"mystorage","creationTimestamp":null},"spec":{"size":"1Gi"},"status":{"path":"/opt/app-root/src/storage/"}}]}`
|
||||
desiredSrorageList := `{"kind":"List","apiVersion":"odo.openshift.io/v1alpha1","metadata":{},"items":[{"kind":"storage","apiVersion":"odo.openshift.io/v1alpha1","metadata":{"name":"mystorage","creationTimestamp":null},"spec":{"size":"1Gi"},"status":{"path":"/opt/app-root/src/storage/"}}]}`
|
||||
Expect(desiredSrorageList).Should(MatchJSON(actualSrorageList))
|
||||
})
|
||||
})
|
||||
|
||||
73
tests/integration/storage_test.go
Normal file
73
tests/integration/storage_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
//. "github.com/Benjamintf1/unmarshalledmatchers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/openshift/odo/tests/helper"
|
||||
)
|
||||
|
||||
var _ = Describe("odoStorageE2e", func() {
|
||||
var project string
|
||||
var context string
|
||||
|
||||
appName := "app"
|
||||
cmpName := "nodejs"
|
||||
|
||||
// This is run after every Spec (It)
|
||||
var _ = BeforeEach(func() {
|
||||
SetDefaultEventuallyTimeout(10 * time.Minute)
|
||||
project = helper.CreateRandProject()
|
||||
context = helper.CreateNewContext()
|
||||
oc = helper.NewOcRunner("oc")
|
||||
})
|
||||
|
||||
// Clean up after the test
|
||||
// This is run after every Spec (It)
|
||||
var _ = AfterEach(func() {
|
||||
helper.DeleteProject(project)
|
||||
os.RemoveAll(".odo")
|
||||
})
|
||||
|
||||
Context("Storage test", func() {
|
||||
|
||||
It("should add a storage, list and delete it", func() {
|
||||
helper.CopyExample(filepath.Join("source", "nodejs"), context)
|
||||
helper.Chdir(context)
|
||||
|
||||
helper.CmdShouldPass("odo", "component", "create", "nodejs", cmpName, "--app", appName, "--project", project)
|
||||
storAdd := helper.CmdShouldPass("odo", "storage", "create", "pv1", "--path", "/mnt/pv1", "--size", "5Gi", "--context", context)
|
||||
Expect(storAdd).To(ContainSubstring("nodejs"))
|
||||
helper.CmdShouldPass("odo", "push")
|
||||
|
||||
dcName := oc.GetDcName(cmpName, project)
|
||||
|
||||
// Check against the volume name against dc
|
||||
getDcVolumeMountName := oc.GetVolumeMountName(dcName)
|
||||
|
||||
Expect(getDcVolumeMountName).To(ContainSubstring("pv1"))
|
||||
|
||||
// Check if the storage is added on the path provided
|
||||
getMntPath := oc.GetVolumeMountPath(dcName)
|
||||
Expect(getMntPath).To(ContainSubstring("/mnt/pv1"))
|
||||
|
||||
storeList := helper.CmdShouldPass("odo", "storage", "list")
|
||||
Expect(storeList).To(ContainSubstring("pv1"))
|
||||
|
||||
// delete the storage
|
||||
helper.CmdShouldPass("odo", "storage", "delete", "pv1", "-f")
|
||||
|
||||
storeList = helper.CmdShouldPass("odo", "storage", "list")
|
||||
Expect(storeList).NotTo(ContainSubstring("pv1"))
|
||||
helper.CmdShouldPass("odo", "push")
|
||||
|
||||
getDcVolumeMountName = oc.GetVolumeMountName(dcName)
|
||||
Expect(getDcVolumeMountName).NotTo(ContainSubstring("pv1"))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
@@ -58,7 +58,6 @@ var _ = Describe("odoWatchE2e", func() {
|
||||
output := helper.CmdShouldFail("odo", "watch")
|
||||
Expect(output).To(ContainSubstring("component does not exist. Please use `odo push` to create you component"))
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user