Podman dev (#6183)

* Add runOn flag

* Instantiate Kube/Podman DevClient instance according to runOn flag

* Start and stop Podman Pod

* Unit tests for createPodFromComponent

* podman mode runs without Kube config

* Fix tests + rebase

* Add sync and exec clients to podman dev

* tbc: call sync

* Remove unnecessary kclient dependency from exec client

* Inject podman client

* Implement ExecCMDInContainer for podman client

* Move [gG]etFirstContainerWithSourceVolume to dev/common package and use it for Podman

* TBC: execute build/run commands

* Fix rebase

* Fix unit test

* Comments for refacto

* Prepare implementation of Platform

* Use ExecuteRunCommand for run handler

* Do not pass parameters present in context

* Move back instanciation of devclient to clientset package

* Move CleanupDevResources method from Watch to Dev package

* CleanupResources: Do not pass parameters present in context

* Fix deleting volumes at exit time

* Check volumes do not exist

* Add support for postStart event

* Make odo dev work when no cluster is configured

* Review

* Add more TODOs

* User interface

* More info on ExitError errors

* Display forwarded ports in output and devstate file

* Add tests for forwarded ports

* Create .odo when necessary

* Add keyboard commands message

* Add doc on podman platform
This commit is contained in:
Philippe Martin
2022-11-21 14:56:44 +01:00
committed by GitHub
parent d1d7ccebbf
commit 20ea3a2043
28 changed files with 1490 additions and 204 deletions

View File

@@ -47,3 +47,15 @@ $ odo dev --run-on=some-platform
### Generic `--run-on` flag
This is a generic flag that allows running `odo` on any supported platform (other than the default Kubernetes or OpenShift cluster mode).
The supported platforms are `cluster` and `podman`.
By default, if you do not use the `--run-on` flag, or if you do not activate the experimental mode, the `cluster` platform is used.
The `cluster` platform uses the current Kubernetes or OpenShift cluster.
The `podman` platform uses the local installation of `podman`. It relies on the `podman` binary to be installed on your system.
These commands support the `--run-on` flag:
- `odo dev`

26
pkg/dev/common/common.go Normal file
View File

@@ -0,0 +1,26 @@
package common
import (
"fmt"
"github.com/devfile/library/pkg/devfile/generator"
corev1 "k8s.io/api/core/v1"
)
// GetFirstContainerWithSourceVolume returns the first container that set mountSources: true as well
// as the path to the source volume inside the container.
// Because the source volume is shared across all components that need it, we only need to sync once,
// so we only need to find one container. If no container was found, that means there's no
// container to sync to, so return an error
func GetFirstContainerWithSourceVolume(containers []corev1.Container) (string, string, error) {
for _, c := range containers {
for _, env := range c.Env {
if env.Name == generator.EnvProjectsSrc {
return c.Name, env.Value, nil
}
}
}
return "", "", fmt.Errorf("in order to sync files, odo requires at least one component in a devfile to set 'mountSources: true'")
}

View File

@@ -0,0 +1,113 @@
package common
import (
"testing"
"github.com/devfile/library/pkg/devfile/generator"
corev1 "k8s.io/api/core/v1"
)
func TestGetFirstContainerWithSourceVolume(t *testing.T) {
tests := []struct {
name string
containers []corev1.Container
want string
wantSourcePath string
wantErr bool
}{
{
name: "Case: One container, Project Source Env",
containers: []corev1.Container{
{
Name: "test",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath",
},
},
},
},
want: "test",
wantSourcePath: "/mypath",
wantErr: false,
},
{
name: "Case: Multiple containers, multiple Project Source Env",
containers: []corev1.Container{
{
Name: "test1",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath1",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath1",
},
},
},
{
Name: "test2",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath2",
},
},
},
},
want: "test1",
wantSourcePath: "/mypath1",
wantErr: false,
},
{
name: "Case: Multiple containers, no Project Source Env",
containers: []corev1.Container{
{
Name: "test1",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath1",
},
},
},
{
Name: "test2",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
container, syncFolder, err := GetFirstContainerWithSourceVolume(tt.containers)
if container != tt.want {
t.Errorf("expected %s, actual %s", tt.want, container)
}
if syncFolder != tt.wantSourcePath {
t.Errorf("expected %s, actual %s", tt.wantSourcePath, syncFolder)
}
if !tt.wantErr == (err != nil) {
t.Errorf("expected %v, actual %v", tt.wantErr, err)
}
})
}
}

View File

@@ -14,6 +14,8 @@ type StartOptions struct {
BuildCommand string
// If RunCommand is set, this will look up the specified run command in the Devfile and execute it. Otherwise, it uses the default one.
RunCommand string
// If DebugCommand is set, this will look up the specified debug command in the Devfile and execute it. Otherwise, it uses the default one.
DebugCommand string
// if RandomPorts is set, will port forward on random local ports, else uses ports starting at 40001
RandomPorts bool
// if WatchFiles is set, files changes will trigger a new sync to the container
@@ -23,7 +25,7 @@ type StartOptions struct {
}
type Client interface {
// Start the resources in devfileObj on the namespace. It then pushes the files in path to the container.
// Start the resources defined in context's Devfile on the platform. It then pushes the files in path to the container.
// It then watches for any changes to the files under path.
// It logs messages and errors to out and errOut.
Start(
@@ -32,4 +34,7 @@ type Client interface {
errOut io.Writer,
options StartOptions,
) error
// CleanupResources deletes the component created using the context's devfile and writes any outputs to out
CleanupResources(ctx context.Context, out io.Writer) error
}

View File

@@ -0,0 +1,43 @@
package kubedev
import (
"context"
"fmt"
"io"
"github.com/redhat-developer/odo/pkg/labels"
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
kerrors "k8s.io/apimachinery/pkg/api/errors"
)
func (o *DevClient) CleanupResources(ctx context.Context, out io.Writer) error {
var (
componentName = odocontext.GetComponentName(ctx)
devfileObj = odocontext.GetDevfileObj(ctx)
)
fmt.Fprintln(out, "Cleaning resources, please wait")
appname := odocontext.GetApplication(ctx)
isInnerLoopDeployed, resources, err := o.deleteClient.ListResourcesToDeleteFromDevfile(*devfileObj, appname, componentName, labels.ComponentDevMode)
if err != nil {
if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) {
fmt.Fprintf(out, "Error connecting to the cluster, the resources were not cleaned up.\nPlease log in again and cleanup the resource with `odo delete component`\n\n")
} else {
fmt.Fprintf(out, "Failed to delete inner loop resources: %v\n", err)
}
return err
}
// if innerloop deployment resource is present, then execute preStop events
if isInnerLoopDeployed {
err = o.deleteClient.ExecutePreStopEvents(*devfileObj, appname, componentName)
if err != nil {
fmt.Fprint(out, "Failed to execute preStop events")
}
}
// delete all the resources
failed := o.deleteClient.DeleteResources(resources, true)
for _, fail := range failed {
fmt.Fprintf(out, "Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
}
return nil
}

View File

@@ -1,4 +1,4 @@
package dev
package kubedev
import (
"context"
@@ -7,6 +7,8 @@ import (
"path/filepath"
"github.com/redhat-developer/odo/pkg/binding"
_delete "github.com/redhat-developer/odo/pkg/component/delete"
"github.com/redhat-developer/odo/pkg/dev"
"github.com/redhat-developer/odo/pkg/devfile"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/kclient"
@@ -33,9 +35,10 @@ type DevClient struct {
syncClient sync.Client
filesystem filesystem.Filesystem
execClient exec.Client
deleteClient _delete.Client
}
var _ Client = (*DevClient)(nil)
var _ dev.Client = (*DevClient)(nil)
func NewDevClient(
kubernetesClient kclient.ClientInterface,
@@ -46,6 +49,7 @@ func NewDevClient(
syncClient sync.Client,
filesystem filesystem.Filesystem,
execClient exec.Client,
deleteClient _delete.Client,
) *DevClient {
return &DevClient{
kubernetesClient: kubernetesClient,
@@ -56,6 +60,7 @@ func NewDevClient(
syncClient: syncClient,
filesystem: filesystem,
execClient: execClient,
deleteClient: deleteClient,
}
}
@@ -63,7 +68,7 @@ func (o *DevClient) Start(
ctx context.Context,
out io.Writer,
errOut io.Writer,
options StartOptions,
options dev.StartOptions,
) error {
klog.V(4).Infoln("Creating new adapter")

View File

@@ -35,6 +35,20 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// CleanupResources mocks base method.
func (m *MockClient) CleanupResources(ctx context.Context, out io.Writer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupResources", ctx, out)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupResources indicates an expected call of CleanupResources.
func (mr *MockClientMockRecorder) CleanupResources(ctx, out interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupResources", reflect.TypeOf((*MockClient)(nil).CleanupResources), ctx, out)
}
// Start mocks base method.
func (m *MockClient) Start(ctx context.Context, out, errOut io.Writer, options StartOptions) error {
m.ctrl.T.Helper()

View File

@@ -0,0 +1,10 @@
package podmandev
import (
"context"
"io"
)
func (o *DevClient) CleanupResources(ctx context.Context, out io.Writer) error {
return nil
}

View File

@@ -0,0 +1,44 @@
package podmandev
import (
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"k8s.io/klog"
"github.com/redhat-developer/odo/pkg/component"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/libdevfile"
"github.com/redhat-developer/odo/pkg/platform"
)
type commandHandler struct {
execClient exec.Client
platformClient platform.Client
componentExists bool
podName string
appName string
componentName string
}
var _ libdevfile.Handler = (*commandHandler)(nil)
func (a commandHandler) ApplyImage(img devfilev1.Component) error {
klog.V(4).Info("apply image commands are not implemented on podman")
return nil
}
func (a commandHandler) ApplyKubernetes(kubernetes devfilev1.Component) error {
klog.V(4).Info("apply kubernetes commands are not implemented on podman")
return nil
}
func (a commandHandler) Execute(devfileCmd devfilev1.Command) error {
return component.ExecuteRunCommand(
a.execClient,
a.platformClient,
devfileCmd,
a.componentExists,
a.podName,
a.appName,
a.componentName,
)
}

99
pkg/dev/podmandev/pod.go Normal file
View File

@@ -0,0 +1,99 @@
package podmandev
import (
"fmt"
"github.com/devfile/library/pkg/devfile/generator"
"github.com/devfile/library/pkg/devfile/parser"
"github.com/devfile/library/pkg/devfile/parser/data/v2/common"
"github.com/redhat-developer/odo/pkg/api"
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/utils"
"github.com/redhat-developer/odo/pkg/storage"
"github.com/redhat-developer/odo/pkg/util"
corev1 "k8s.io/api/core/v1"
)
func createPodFromComponent(
devfileObj parser.DevfileObj,
componentName string,
appName string,
buildCommand string,
runCommand string,
debugCommand string,
) (*corev1.Pod, []api.ForwardedPort, error) {
containers, err := generator.GetContainers(devfileObj, common.DevfileOptions{})
if err != nil {
return nil, nil, err
}
if len(containers) == 0 {
return nil, nil, fmt.Errorf("no valid components found in the devfile")
}
containers, err = utils.UpdateContainersEntrypointsIfNeeded(devfileObj, containers, buildCommand, runCommand, debugCommand)
if err != nil {
return nil, nil, err
}
utils.AddOdoProjectVolume(&containers)
utils.AddOdoMandatoryVolume(&containers)
fwPorts := addHostPorts(containers)
volumes := []corev1.Volume{
{
Name: storage.OdoSourceVolume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: getVolumeName(componentName, appName, "source"),
},
},
},
{
Name: storage.SharedDataVolumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: getVolumeName(componentName, appName, "shared"),
},
},
},
}
// TODO add labels (for GetRunningPodFromSelector)
pod := corev1.Pod{
Spec: corev1.PodSpec{
Containers: containers,
Volumes: volumes,
},
}
pod.APIVersion, pod.Kind = corev1.SchemeGroupVersion.WithKind("Pod").ToAPIVersionAndKind()
name, err := util.NamespaceKubernetesObject(componentName, appName)
if err != nil {
return nil, nil, err
}
pod.SetName(name)
return &pod, fwPorts, nil
}
func getVolumeName(componentName string, appName string, volume string) string {
return "odo-projects-" + componentName + "-" + appName + "-" + volume
}
func addHostPorts(containers []corev1.Container) []api.ForwardedPort {
result := []api.ForwardedPort{}
hostPort := int32(39001)
for i := range containers {
for j := range containers[i].Ports {
result = append(result, api.ForwardedPort{
ContainerName: containers[i].Name,
LocalAddress: "127.0.0.1",
LocalPort: int(hostPort),
ContainerPort: int(containers[i].Ports[j].ContainerPort),
})
containers[i].Ports[j].HostPort = hostPort
hostPort++
}
}
return result
}

View File

@@ -0,0 +1,294 @@
package podmandev
import (
"testing"
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/devfile/library/pkg/devfile/parser"
"github.com/devfile/library/pkg/devfile/parser/data"
"github.com/kylelemons/godebug/pretty"
"github.com/redhat-developer/odo/pkg/api"
"github.com/redhat-developer/odo/pkg/libdevfile/generator"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
var (
devfileName = "mycmp"
appName = "app"
command = generator.GetExecCommand(generator.ExecCommandParams{
Id: "run",
Component: "mycomponent",
CommandLine: "./run",
IsDefault: pointer.Bool(true),
Kind: v1alpha2.RunCommandGroupKind,
})
baseComponent = generator.GetContainerComponent(generator.ContainerComponentParams{
Name: "mycomponent",
Container: v1alpha2.Container{
Image: "myimage",
},
})
basePod = &corev1.Pod{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: v1.ObjectMeta{
Name: "mycmp-app",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Args: []string{"-f", "/dev/null"},
Command: []string{"tail"},
Env: []corev1.EnvVar{
{
Name: "PROJECTS_ROOT",
Value: "/projects",
},
{
Name: "PROJECT_SOURCE",
Value: "/projects",
},
},
Image: "myimage",
ImagePullPolicy: "Always",
Name: "mycomponent",
VolumeMounts: []corev1.VolumeMount{
{
MountPath: "/projects",
Name: "odo-projects",
},
{
MountPath: "/opt/odo/",
Name: "odo-shared-data",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "odo-projects",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "odo-projects-mycmp-app-source",
},
},
},
{
Name: "odo-shared-data",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "odo-projects-mycmp-app-shared",
},
},
},
},
},
}
)
func Test_createPodFromComponent(t *testing.T) {
type args struct {
devfileObj func() parser.DevfileObj
componentName string
appName string
buildCommand string
runCommand string
debugCommand string
}
tests := []struct {
name string
args args
wantPod func() *corev1.Pod
wantFwPorts []api.ForwardedPort
wantErr bool
}{
{
name: "basic component without command",
args: args{
devfileObj: func() parser.DevfileObj {
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = data.AddCommands([]v1alpha2.Command{command})
_ = data.AddComponents([]v1alpha2.Component{baseComponent})
return parser.DevfileObj{
Data: data,
}
},
componentName: devfileName,
appName: appName,
},
wantPod: func() *corev1.Pod {
pod := basePod.DeepCopy()
return pod
},
},
{
name: "basic component with command",
args: args{
devfileObj: func() parser.DevfileObj {
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = data.AddCommands([]v1alpha2.Command{command})
cmp := baseComponent.DeepCopy()
cmp.Container.Command = []string{"./cmd"}
cmp.Container.Args = []string{"arg1", "arg2"}
_ = data.AddComponents([]v1alpha2.Component{*cmp})
return parser.DevfileObj{
Data: data,
}
},
componentName: devfileName,
appName: appName,
},
wantPod: func() *corev1.Pod {
pod := basePod.DeepCopy()
pod.Spec.Containers[0].Command = []string{"./cmd"}
pod.Spec.Containers[0].Args = []string{"arg1", "arg2"}
return pod
},
},
{
name: "basic component + memory limit",
args: args{
devfileObj: func() parser.DevfileObj {
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = data.AddCommands([]v1alpha2.Command{command})
cmp := baseComponent.DeepCopy()
cmp.Container.MemoryLimit = "1Gi"
_ = data.AddComponents([]v1alpha2.Component{*cmp})
return parser.DevfileObj{
Data: data,
}
},
componentName: devfileName,
appName: appName,
},
wantPod: func() *corev1.Pod {
pod := basePod.DeepCopy()
pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{
"memory": resource.MustParse("1Gi"),
}
return pod
},
},
{
name: "basic component + application endpoint",
args: args{
devfileObj: func() parser.DevfileObj {
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = data.AddCommands([]v1alpha2.Command{command})
cmp := baseComponent.DeepCopy()
cmp.Container.Endpoints = append(cmp.Container.Endpoints, v1alpha2.Endpoint{
Name: "http",
TargetPort: 8080,
})
_ = data.AddComponents([]v1alpha2.Component{*cmp})
return parser.DevfileObj{
Data: data,
}
},
componentName: devfileName,
appName: appName,
},
wantPod: func() *corev1.Pod {
pod := basePod.DeepCopy()
pod.Spec.Containers[0].Ports = append(pod.Spec.Containers[0].Ports, corev1.ContainerPort{
Name: "http",
ContainerPort: 8080,
Protocol: "TCP",
HostPort: 39001,
})
return pod
},
wantFwPorts: []api.ForwardedPort{
{
ContainerName: "mycomponent",
LocalAddress: "127.0.0.1",
LocalPort: 39001,
ContainerPort: 8080,
},
},
},
{
name: "basic component + application endpoint + debug endpoint",
args: args{
devfileObj: func() parser.DevfileObj {
data, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = data.AddCommands([]v1alpha2.Command{command})
cmp := baseComponent.DeepCopy()
cmp.Container.Endpoints = append(cmp.Container.Endpoints, v1alpha2.Endpoint{
Name: "http",
TargetPort: 8080,
})
cmp.Container.Endpoints = append(cmp.Container.Endpoints, v1alpha2.Endpoint{
Name: "debug",
TargetPort: 5858,
})
_ = data.AddComponents([]v1alpha2.Component{*cmp})
return parser.DevfileObj{
Data: data,
}
},
componentName: devfileName,
appName: appName,
},
wantPod: func() *corev1.Pod {
pod := basePod.DeepCopy()
pod.Spec.Containers[0].Ports = append(pod.Spec.Containers[0].Ports, corev1.ContainerPort{
Name: "http",
ContainerPort: 8080,
Protocol: "TCP",
HostPort: 39001,
})
pod.Spec.Containers[0].Ports = append(pod.Spec.Containers[0].Ports, corev1.ContainerPort{
Name: "debug",
ContainerPort: 5858,
Protocol: "TCP",
HostPort: 39002,
})
return pod
},
wantFwPorts: []api.ForwardedPort{
{
ContainerName: "mycomponent",
LocalAddress: "127.0.0.1",
LocalPort: 39001,
ContainerPort: 8080,
},
{
ContainerName: "mycomponent",
LocalAddress: "127.0.0.1",
LocalPort: 39002,
ContainerPort: 5858,
},
},
},
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, gotFwPorts, err := createPodFromComponent(tt.args.devfileObj(), tt.args.componentName, tt.args.appName, tt.args.buildCommand, tt.args.runCommand, tt.args.debugCommand)
if (err != nil) != tt.wantErr {
t.Errorf("createPodFromComponent() error = %v, wantErr %v", err, tt.wantErr)
return
}
want := tt.wantPod()
if !equality.Semantic.DeepEqual(got, want) {
t.Errorf("createPodFromComponent() pod: %s", pretty.Compare(want, got))
}
if !equality.Semantic.DeepEqual(gotFwPorts, tt.wantFwPorts) {
t.Errorf("createPodFromComponent() fwPorts: %s", pretty.Compare(tt.wantFwPorts, gotFwPorts))
}
})
}
}

View File

@@ -0,0 +1,271 @@
package podmandev
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/fatih/color"
"github.com/redhat-developer/odo/pkg/api"
"github.com/redhat-developer/odo/pkg/component"
"github.com/redhat-developer/odo/pkg/dev"
"github.com/redhat-developer/odo/pkg/dev/common"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/libdevfile"
"github.com/redhat-developer/odo/pkg/log"
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
"github.com/redhat-developer/odo/pkg/podman"
"github.com/redhat-developer/odo/pkg/state"
"github.com/redhat-developer/odo/pkg/sync"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog"
)
const (
PromptMessage = `
[Ctrl+c] - Exit and delete resources from podman
`
)
type DevClient struct {
podmanClient podman.Client
syncClient sync.Client
execClient exec.Client
stateClient state.Client
}
var _ dev.Client = (*DevClient)(nil)
func NewDevClient(
podmanClient podman.Client,
syncClient sync.Client,
execClient exec.Client,
stateClient state.Client,
) *DevClient {
return &DevClient{
podmanClient: podmanClient,
syncClient: syncClient,
execClient: execClient,
stateClient: stateClient,
}
}
func (o *DevClient) Start(
ctx context.Context,
out io.Writer,
errOut io.Writer,
options dev.StartOptions,
) error {
var (
appName = odocontext.GetApplication(ctx)
componentName = odocontext.GetComponentName(ctx)
devfileObj = odocontext.GetDevfileObj(ctx)
devfilePath = odocontext.GetDevfilePath(ctx)
path = filepath.Dir(devfilePath)
)
pod, fwPorts, err := o.deployPod(ctx, options)
if err != nil {
return err
}
for _, fwPort := range fwPorts {
s := fmt.Sprintf("Forwarding from %s:%d -> %d", fwPort.LocalAddress, fwPort.LocalPort, fwPort.ContainerPort)
fmt.Fprintf(out, " - %s", log.SboldColor(color.FgGreen, s))
}
err = o.stateClient.SetForwardedPorts(fwPorts)
if err != nil {
return err
}
execRequired, err := o.syncFiles(ctx, options, pod, path)
if err != nil {
return err
}
// PostStart events from the devfile will only be executed when the component
// didn't previously exist
if libdevfile.HasPostStartEvents(*devfileObj) {
execHandler := component.NewExecHandler(
o.podmanClient,
o.execClient,
appName,
componentName,
pod.Name,
"",
false, /* TODO */
)
err = libdevfile.ExecPostStartEvents(*devfileObj, execHandler)
if err != nil {
return err
}
}
if execRequired {
doExecuteBuildCommand := func() error {
execHandler := component.NewExecHandler(
o.podmanClient,
o.execClient,
appName,
componentName,
pod.Name,
"Building your application in container",
false, /* TODO */
)
return libdevfile.Build(*devfileObj, options.BuildCommand, execHandler)
}
err = doExecuteBuildCommand()
if err != nil {
return err
}
cmdKind := devfilev1.RunCommandGroupKind
cmdName := options.RunCommand
if options.Debug {
cmdKind = devfilev1.DebugCommandGroupKind
cmdName = options.DebugCommand
}
cmdHandler := commandHandler{
execClient: o.execClient,
platformClient: o.podmanClient,
componentExists: false, // TODO
podName: pod.Name,
appName: appName,
componentName: componentName,
}
err = libdevfile.ExecuteCommandByNameAndKind(*devfileObj, cmdName, cmdKind, &cmdHandler, false)
if err != nil {
return err
}
}
fmt.Fprintf(
out,
" %s%s",
log.Sbold("Keyboard Commands:"),
PromptMessage,
)
<-ctx.Done()
fmt.Printf("Cleaning up resources\n")
err = o.podmanClient.PodStop(pod.GetName())
if err != nil {
return err
}
err = o.podmanClient.PodRm(pod.GetName())
if err != nil {
return err
}
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue
}
volumeName := volume.PersistentVolumeClaim.ClaimName
klog.V(3).Infof("deleting podman volume %q", volumeName)
err = o.podmanClient.VolumeRm(volumeName)
if err != nil {
return err
}
}
return nil
}
// deployPod deploys the component as a Pod in podman
func (o *DevClient) deployPod(ctx context.Context, options dev.StartOptions) (*corev1.Pod, []api.ForwardedPort, error) {
var (
appName = odocontext.GetApplication(ctx)
componentName = odocontext.GetComponentName(ctx)
devfileObj = odocontext.GetDevfileObj(ctx)
)
spinner := log.Spinner("Deploying pod")
defer spinner.End(false)
pod, fwPorts, err := createPodFromComponent(
*devfileObj,
componentName,
appName,
options.BuildCommand,
options.RunCommand,
"",
)
if err != nil {
return nil, nil, err
}
err = o.checkVolumesFree(pod)
if err != nil {
return nil, nil, err
}
err = o.podmanClient.PlayKube(pod)
if err != nil {
return nil, nil, err
}
spinner.End(true)
return pod, fwPorts, nil
}
// syncFiles syncs the local source files in path into the pod's source volume
func (o *DevClient) syncFiles(ctx context.Context, options dev.StartOptions, pod *corev1.Pod, path string) (bool, error) {
var (
componentName = odocontext.GetComponentName(ctx)
)
containerName, syncFolder, err := common.GetFirstContainerWithSourceVolume(pod.Spec.Containers)
if err != nil {
return false, fmt.Errorf("error while retrieving container from pod %s with a mounted project volume: %w", pod.GetName(), err)
}
compInfo := sync.ComponentInfo{
ComponentName: componentName,
ContainerName: containerName,
PodName: pod.GetName(),
SyncFolder: syncFolder,
}
syncParams := sync.SyncParameters{
Path: path,
WatchFiles: nil,
WatchDeletedFiles: nil,
IgnoredFiles: options.IgnorePaths,
DevfileScanIndexForWatch: true,
CompInfo: compInfo,
ForcePush: true,
Files: map[string]string{}, // ??? TODO
}
execRequired, err := o.syncClient.SyncFiles(syncParams)
if err != nil {
return false, err
}
return execRequired, nil
}
// checkVolumesFree checks that all persistent volumes declared in pod
// are not using an existing volume
func (o *DevClient) checkVolumesFree(pod *corev1.Pod) error {
existingVolumesSet, err := o.podmanClient.VolumeLs()
if err != nil {
return err
}
var problematicVolumes []string
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil && existingVolumesSet[volume.PersistentVolumeClaim.ClaimName] {
problematicVolumes = append(problematicVolumes, volume.PersistentVolumeClaim.ClaimName)
}
}
if len(problematicVolumes) > 0 {
return fmt.Errorf("volumes already exist, please remove them before to run odo dev: %s", strings.Join(problematicVolumes, ", "))
}
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/redhat-developer/odo/pkg/binding"
"github.com/redhat-developer/odo/pkg/component"
"github.com/redhat-developer/odo/pkg/dev/common"
"github.com/redhat-developer/odo/pkg/devfile"
"github.com/redhat-developer/odo/pkg/devfile/adapters"
"github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/storage"
@@ -209,7 +210,7 @@ func (a Adapter) Push(ctx context.Context, parameters adapters.PushParameters, c
}
// Find at least one pod with the source volume mounted, error out if none can be found
containerName, syncFolder, err := getFirstContainerWithSourceVolume(pod.Spec.Containers)
containerName, syncFolder, err := common.GetFirstContainerWithSourceVolume(pod.Spec.Containers)
if err != nil {
return fmt.Errorf("error while retrieving container from pod %s with a mounted project volume: %w", pod.GetName(), err)
}
@@ -754,23 +755,6 @@ func (a Adapter) deleteServiceBindingSecrets(serviceBindingSecretsToRemove []uns
return nil
}
// getFirstContainerWithSourceVolume returns the first container that set mountSources: true as well
// as the path to the source volume inside the container.
// Because the source volume is shared across all components that need it, we only need to sync once,
// so we only need to find one container. If no container was found, that means there's no
// container to sync to, so return an error
func getFirstContainerWithSourceVolume(containers []corev1.Container) (string, string, error) {
for _, c := range containers {
for _, env := range c.Env {
if env.Name == generator.EnvProjectsSrc {
return c.Name, env.Value, nil
}
}
}
return "", "", fmt.Errorf("in order to sync files, odo requires at least one component in a devfile to set 'mountSources: true'")
}
// PushCommandsMap stores the commands to be executed as per their types.
type PushCommandsMap map[devfilev1.CommandGroupKind]devfilev1.Command

View File

@@ -21,7 +21,6 @@ import (
odoTestingUtil "github.com/redhat-developer/odo/pkg/testingutil"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ktesting "k8s.io/client-go/testing"
@@ -133,110 +132,6 @@ func TestCreateOrUpdateComponent(t *testing.T) {
}
func TestGetFirstContainerWithSourceVolume(t *testing.T) {
tests := []struct {
name string
containers []corev1.Container
want string
wantSourcePath string
wantErr bool
}{
{
name: "Case: One container, Project Source Env",
containers: []corev1.Container{
{
Name: "test",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath",
},
},
},
},
want: "test",
wantSourcePath: "/mypath",
wantErr: false,
},
{
name: "Case: Multiple containers, multiple Project Source Env",
containers: []corev1.Container{
{
Name: "test1",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath1",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath1",
},
},
},
{
Name: "test2",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
{
Name: generator.EnvProjectsSrc,
Value: "/mypath2",
},
},
},
},
want: "test1",
wantSourcePath: "/mypath1",
wantErr: false,
},
{
name: "Case: Multiple containers, no Project Source Env",
containers: []corev1.Container{
{
Name: "test1",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath1",
},
},
},
{
Name: "test2",
Env: []corev1.EnvVar{
{
Name: "RANDOMENV",
Value: "/mypath2",
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
container, syncFolder, err := getFirstContainerWithSourceVolume(tt.containers)
if container != tt.want {
t.Errorf("expected %s, actual %s", tt.want, container)
}
if syncFolder != tt.wantSourcePath {
t.Errorf("expected %s, actual %s", tt.wantSourcePath, syncFolder)
}
if !tt.wantErr == (err != nil) {
t.Errorf("expected %v, actual %v", tt.wantErr, err)
}
})
}
}
func getExecCommand(id string, group devfilev1.CommandGroupKind) devfilev1.Command {
commands := [...]string{"ls -la", "pwd"}

View File

@@ -29,7 +29,9 @@ import (
)
// RecommendedCommandName is the recommended command name
const RecommendedCommandName = "dev"
const (
RecommendedCommandName = "dev"
)
type DevOptions struct {
// Clients
@@ -108,11 +110,25 @@ func (o *DevOptions) Run(ctx context.Context) (err error) {
path = filepath.Dir(devfilePath)
componentName = odocontext.GetComponentName(ctx)
variables = fcontext.GetVariables(ctx)
platform = fcontext.GetRunOn(ctx)
)
var dest string
var deployingTo string
switch platform {
case commonflags.RunOnPodman:
dest = "Platform: podman"
deployingTo = "podman"
case commonflags.RunOnCluster:
dest = "Namespace: " + odocontext.GetNamespace(ctx)
deployingTo = "the cluster"
default:
panic(fmt.Errorf("platform %s is not implemented", platform))
}
// Output what the command is doing / information
log.Title("Developing using the \""+componentName+"\" Devfile",
"Namespace: "+odocontext.GetNamespace(ctx),
dest,
"odo version: "+version.VERSION)
// check for .gitignore file and add odo-file-index.json to .gitignore
@@ -140,7 +156,8 @@ func (o *DevOptions) Run(ctx context.Context) (err error) {
scontext.SetProjectType(ctx, devFileObj.Data.GetMetadata().ProjectType)
scontext.SetDevfileName(ctx, componentName)
log.Section("Deploying to the cluster in developer mode")
log.Sectionf("Deploying to %s in developer mode", deployingTo)
return o.clientset.DevClient.Start(
o.ctx,
o.out,
@@ -166,10 +183,9 @@ func (o *DevOptions) HandleSignal() error {
func (o *DevOptions) Cleanup(ctx context.Context, commandError error) {
if commandError != nil {
devFileObj := odocontext.GetDevfileObj(ctx)
componentName := odocontext.GetComponentName(ctx)
_ = o.clientset.WatchClient.CleanupDevResources(ctx, *devFileObj, componentName, log.GetStdout())
_ = o.clientset.DevClient.CleanupResources(ctx, log.GetStdout())
}
_ = o.clientset.StateClient.SaveExit()
}
// NewCmdDev implements the odo dev command
@@ -196,16 +212,21 @@ It forwards endpoints with any exposure values ('public', 'internal' or 'none')
clientset.Add(devCmd,
clientset.BINDING,
clientset.DEV,
clientset.EXEC,
clientset.FILESYSTEM,
clientset.INIT,
clientset.KUBERNETES,
clientset.KUBERNETES_NULLABLE,
clientset.PODMAN,
clientset.PORT_FORWARD,
clientset.PREFERENCE,
clientset.STATE,
clientset.SYNC,
clientset.WATCH,
)
// Add a defined annotation in order to appear in the help menu
devCmd.Annotations["command"] = "main"
devCmd.SetUsageTemplate(odoutil.CmdUsageTemplate)
commonflags.UseVariablesFlags(devCmd)
commonflags.UseRunOnFlag(devCmd)
return devCmd
}

View File

@@ -16,9 +16,12 @@ import (
"github.com/spf13/cobra"
"github.com/redhat-developer/odo/pkg/dev/kubedev"
"github.com/redhat-developer/odo/pkg/dev/podmandev"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/logs"
"github.com/redhat-developer/odo/pkg/odo/commonflags"
"github.com/redhat-developer/odo/pkg/podman"
"github.com/redhat-developer/odo/pkg/portForward"
"github.com/redhat-developer/odo/pkg/sync"
@@ -61,6 +64,8 @@ const (
KUBERNETES = "DEP_KUBERNETES"
// LOGS instantiates client for pkg/logs
LOGS = "DEP_LOGS"
// PODMAN instantiates client for pkg/podman
PODMAN = "DEP_PODMAN"
// PORT_FORWARD instantiates client for pkg/portForward
PORT_FORWARD = "PORT_FORWARD"
// PREFERENCE instantiates client for pkg/preference
@@ -82,19 +87,19 @@ const (
// Clients will be created only once and be reused for sub-dependencies
var subdeps map[string][]string = map[string][]string{
ALIZER: {REGISTRY},
DELETE_COMPONENT: {KUBERNETES, EXEC},
DELETE_COMPONENT: {KUBERNETES_NULLABLE, EXEC},
DEPLOY: {KUBERNETES, FILESYSTEM},
DEV: {BINDING, EXEC, FILESYSTEM, KUBERNETES, PORT_FORWARD, PREFERENCE, SYNC, WATCH},
EXEC: {KUBERNETES},
DEV: {BINDING, DELETE_COMPONENT, EXEC, FILESYSTEM, KUBERNETES_NULLABLE, PODMAN, PORT_FORWARD, PREFERENCE, STATE, SYNC, WATCH},
EXEC: {KUBERNETES_NULLABLE},
INIT: {ALIZER, FILESYSTEM, PREFERENCE, REGISTRY},
LOGS: {KUBERNETES},
PORT_FORWARD: {KUBERNETES, STATE},
LOGS: {KUBERNETES_NULLABLE, PODMAN},
PORT_FORWARD: {KUBERNETES_NULLABLE, STATE},
PROJECT: {KUBERNETES_NULLABLE},
REGISTRY: {FILESYSTEM, PREFERENCE},
STATE: {FILESYSTEM},
SYNC: {EXEC, KUBERNETES},
WATCH: {KUBERNETES, DELETE_COMPONENT, STATE},
BINDING: {PROJECT, KUBERNETES},
SYNC: {EXEC},
WATCH: {KUBERNETES_NULLABLE},
BINDING: {PROJECT, KUBERNETES_NULLABLE},
/* Add sub-dependencies here, if any */
}
@@ -109,6 +114,7 @@ type Clientset struct {
InitClient _init.Client
KubernetesClient kclient.ClientInterface
LogsClient logs.Client
PodmanClient podman.Client
PortForwardClient portForward.Client
PreferenceClient preference.Client
ProjectClient project.Client
@@ -148,9 +154,16 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
}
if isDefined(command, KUBERNETES) || isDefined(command, KUBERNETES_NULLABLE) {
dep.KubernetesClient, err = kclient.New()
if err != nil && isDefined(command, KUBERNETES) {
return nil, err
if err != nil {
if isDefined(command, KUBERNETES) {
return nil, err
}
dep.KubernetesClient = nil
}
}
if isDefined(command, PODMAN) {
dep.PodmanClient = podman.NewPodmanCli()
}
if isDefined(command, PREFERENCE) {
dep.PreferenceClient, err = preference.NewClient(command.Context())
@@ -170,6 +183,8 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
switch platform {
case commonflags.RunOnCluster:
dep.ExecClient = exec.NewExecClient(dep.KubernetesClient)
case commonflags.RunOnPodman:
dep.ExecClient = exec.NewExecClient(dep.PodmanClient)
default:
panic(fmt.Sprintf("not implemented yet for platform %q", platform))
}
@@ -187,6 +202,8 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
switch platform {
case commonflags.RunOnCluster:
dep.LogsClient = logs.NewLogsClient(dep.KubernetesClient)
case commonflags.RunOnPodman:
dep.LogsClient = logs.NewLogsClient(dep.PodmanClient)
default:
panic(fmt.Sprintf("not implemented yet for platform %q", platform))
}
@@ -198,10 +215,17 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
dep.StateClient = state.NewStateClient(dep.FS)
}
if isDefined(command, SYNC) {
dep.SyncClient = sync.NewSyncClient(dep.KubernetesClient, dep.ExecClient)
switch platform {
case commonflags.RunOnCluster:
dep.SyncClient = sync.NewSyncClient(dep.KubernetesClient, dep.ExecClient)
case commonflags.RunOnPodman:
dep.SyncClient = sync.NewSyncClient(dep.PodmanClient, dep.ExecClient)
default:
panic(fmt.Sprintf("not implemented yet for platform %q", platform))
}
}
if isDefined(command, WATCH) {
dep.WatchClient = watch.NewWatchClient(dep.KubernetesClient, dep.DeleteClient, dep.StateClient)
dep.WatchClient = watch.NewWatchClient(dep.KubernetesClient)
}
if isDefined(command, BINDING) {
dep.BindingClient = binding.NewBindingClient(dep.ProjectClient, dep.KubernetesClient)
@@ -210,7 +234,29 @@ func Fetch(command *cobra.Command, platform string) (*Clientset, error) {
dep.PortForwardClient = portForward.NewPFClient(dep.KubernetesClient, dep.StateClient)
}
if isDefined(command, DEV) {
dep.DevClient = dev.NewDevClient(dep.KubernetesClient, dep.PreferenceClient, dep.PortForwardClient, dep.WatchClient, dep.BindingClient, dep.SyncClient, dep.FS, dep.ExecClient)
switch platform {
case commonflags.RunOnCluster:
dep.DevClient = kubedev.NewDevClient(
dep.KubernetesClient,
dep.PreferenceClient,
dep.PortForwardClient,
dep.WatchClient,
dep.BindingClient,
dep.SyncClient,
dep.FS,
dep.ExecClient,
dep.DeleteClient,
)
case commonflags.RunOnPodman:
dep.DevClient = podmandev.NewDevClient(
dep.PodmanClient,
dep.SyncClient,
dep.ExecClient,
dep.StateClient,
)
default:
panic(fmt.Sprintf("not implemented yet for platform %q", platform))
}
}
/* Instantiate new clients here. Take care to instantiate after all sub-dependencies */

34
pkg/podman/exec.go Normal file
View File

@@ -0,0 +1,34 @@
package podman
import (
"fmt"
"io"
"os/exec"
"k8s.io/klog"
)
func (o *PodmanCli) ExecCMDInContainer(containerName, podName string, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error {
options := []string{}
if tty {
options = append(options, "--tty")
}
name := fmt.Sprintf("%s-%s", podName, containerName)
args := []string{"exec", "--interactive"}
args = append(args, options...)
args = append(args, name)
args = append(args, cmd...)
command := exec.Command("podman", args...)
command.Stdin = stdin
klog.V(4).Infof("exec podman %v\n", args)
out, err := command.Output()
if err != nil {
return err
}
_, err = stdout.Write(out)
return err
}

44
pkg/podman/interface.go Normal file
View File

@@ -0,0 +1,44 @@
package podman
import (
"io"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type Client interface {
// PlayKube creates the Pod with Podman
PlayKube(pod *corev1.Pod) error
// PodStop stops the pod with given podname
PodStop(podname string) error
// PodRm deletes the pod with given podname
PodRm(podname string) error
// VolumeLs lists the names of existing volumes
VolumeLs() (map[string]bool, error)
// VolumeRm deletes the volume with given volumeName
VolumeRm(volumeName string) error
ExecCMDInContainer(containerName, podName string, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error
// GetPodLogs returns the logs of the specified pod container.
// All logs for all containers part of the pod are returned if an empty string is provided as container name.
GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error)
// GetPodsMatchingSelector returns all pods matching the given label selector.
GetPodsMatchingSelector(selector string) (*corev1.PodList, error)
// GetAllResourcesFromSelector returns all resources of any kind matching the given label selector.
GetAllResourcesFromSelector(selector string, ns string) ([]unstructured.Unstructured, error)
// GetAllPodsInNamespaceMatchingSelector returns all pods matching the given label selector and in the specified namespace.
GetAllPodsInNamespaceMatchingSelector(selector string, ns string) (*corev1.PodList, error)
// GetRunningPodFromSelector returns any pod matching the given label selector.
// If multiple pods are found, implementations might have different behavior, by either returning an error or returning any element.
GetRunningPodFromSelector(selector string) (*corev1.Pod, error)
}

10
pkg/podman/logs.go Normal file
View File

@@ -0,0 +1,10 @@
package podman
import "io"
// GetPodLogs returns the logs of the specified pod container.
// All logs for all containers part of the pod are returned if an empty string is provided as container name.
func (o *PodmanCli) GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error) {
// TODO(feloy)
return nil, nil
}

197
pkg/podman/mock.go Normal file
View File

@@ -0,0 +1,197 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: pkg/podman/interface.go
// Package podman is a generated GoMock package.
package podman
import (
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/core/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// ExecCMDInContainer mocks base method.
func (m *MockClient) ExecCMDInContainer(containerName, podName string, cmd []string, stdout, stderr io.Writer, stdin io.Reader, tty bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecCMDInContainer", containerName, podName, cmd, stdout, stderr, stdin, tty)
ret0, _ := ret[0].(error)
return ret0
}
// ExecCMDInContainer indicates an expected call of ExecCMDInContainer.
func (mr *MockClientMockRecorder) ExecCMDInContainer(containerName, podName, cmd, stdout, stderr, stdin, tty interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecCMDInContainer", reflect.TypeOf((*MockClient)(nil).ExecCMDInContainer), containerName, podName, cmd, stdout, stderr, stdin, tty)
}
// GetAllPodsInNamespaceMatchingSelector mocks base method.
func (m *MockClient) GetAllPodsInNamespaceMatchingSelector(selector, ns string) (*v1.PodList, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAllPodsInNamespaceMatchingSelector", selector, ns)
ret0, _ := ret[0].(*v1.PodList)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetAllPodsInNamespaceMatchingSelector indicates an expected call of GetAllPodsInNamespaceMatchingSelector.
func (mr *MockClientMockRecorder) GetAllPodsInNamespaceMatchingSelector(selector, ns interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllPodsInNamespaceMatchingSelector", reflect.TypeOf((*MockClient)(nil).GetAllPodsInNamespaceMatchingSelector), selector, ns)
}
// GetAllResourcesFromSelector mocks base method.
func (m *MockClient) GetAllResourcesFromSelector(selector, ns string) ([]unstructured.Unstructured, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAllResourcesFromSelector", selector, ns)
ret0, _ := ret[0].([]unstructured.Unstructured)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetAllResourcesFromSelector indicates an expected call of GetAllResourcesFromSelector.
func (mr *MockClientMockRecorder) GetAllResourcesFromSelector(selector, ns interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllResourcesFromSelector", reflect.TypeOf((*MockClient)(nil).GetAllResourcesFromSelector), selector, ns)
}
// GetPodLogs mocks base method.
func (m *MockClient) GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodLogs", podName, containerName, followLog)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPodLogs indicates an expected call of GetPodLogs.
func (mr *MockClientMockRecorder) GetPodLogs(podName, containerName, followLog interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodLogs", reflect.TypeOf((*MockClient)(nil).GetPodLogs), podName, containerName, followLog)
}
// GetPodsMatchingSelector mocks base method.
func (m *MockClient) GetPodsMatchingSelector(selector string) (*v1.PodList, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodsMatchingSelector", selector)
ret0, _ := ret[0].(*v1.PodList)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPodsMatchingSelector indicates an expected call of GetPodsMatchingSelector.
func (mr *MockClientMockRecorder) GetPodsMatchingSelector(selector interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsMatchingSelector", reflect.TypeOf((*MockClient)(nil).GetPodsMatchingSelector), selector)
}
// GetRunningPodFromSelector mocks base method.
func (m *MockClient) GetRunningPodFromSelector(selector string) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRunningPodFromSelector", selector)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRunningPodFromSelector indicates an expected call of GetRunningPodFromSelector.
func (mr *MockClientMockRecorder) GetRunningPodFromSelector(selector interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningPodFromSelector", reflect.TypeOf((*MockClient)(nil).GetRunningPodFromSelector), selector)
}
// PlayKube mocks base method.
func (m *MockClient) PlayKube(pod *v1.Pod) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PlayKube", pod)
ret0, _ := ret[0].(error)
return ret0
}
// PlayKube indicates an expected call of PlayKube.
func (mr *MockClientMockRecorder) PlayKube(pod interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlayKube", reflect.TypeOf((*MockClient)(nil).PlayKube), pod)
}
// PodRm mocks base method.
func (m *MockClient) PodRm(podname string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PodRm", podname)
ret0, _ := ret[0].(error)
return ret0
}
// PodRm indicates an expected call of PodRm.
func (mr *MockClientMockRecorder) PodRm(podname interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodRm", reflect.TypeOf((*MockClient)(nil).PodRm), podname)
}
// PodStop mocks base method.
func (m *MockClient) PodStop(podname string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PodStop", podname)
ret0, _ := ret[0].(error)
return ret0
}
// PodStop indicates an expected call of PodStop.
func (mr *MockClientMockRecorder) PodStop(podname interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodStop", reflect.TypeOf((*MockClient)(nil).PodStop), podname)
}
// VolumeLs mocks base method.
func (m *MockClient) VolumeLs() (map[string]bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeLs")
ret0, _ := ret[0].(map[string]bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VolumeLs indicates an expected call of VolumeLs.
func (mr *MockClientMockRecorder) VolumeLs() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeLs", reflect.TypeOf((*MockClient)(nil).VolumeLs))
}
// VolumeRm mocks base method.
func (m *MockClient) VolumeRm(volumeName string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeRm", volumeName)
ret0, _ := ret[0].(error)
return ret0
}
// VolumeRm indicates an expected call of VolumeRm.
func (mr *MockClientMockRecorder) VolumeRm(volumeName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeRm", reflect.TypeOf((*MockClient)(nil).VolumeRm), volumeName)
}

127
pkg/podman/podman.go Normal file
View File

@@ -0,0 +1,127 @@
package podman
import (
"bufio"
"fmt"
"os/exec"
"strings"
corev1 "k8s.io/api/core/v1"
jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/klog"
"k8s.io/kubectl/pkg/scheme"
)
type PodmanCli struct{}
func NewPodmanCli() *PodmanCli {
return &PodmanCli{}
}
func (o *PodmanCli) PlayKube(pod *corev1.Pod) error {
serializer := jsonserializer.NewSerializerWithOptions(
jsonserializer.SimpleMetaFactory{},
scheme.Scheme,
scheme.Scheme,
jsonserializer.SerializerOptions{
Yaml: true,
},
)
cmd := exec.Command("podman", "play", "kube", "-")
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
cmd.Stderr = cmd.Stdout
if err = cmd.Start(); err != nil {
return err
}
err = serializer.Encode(pod, stdin)
if err != nil {
return err
}
stdin.Close()
go func() {
for {
tmp := make([]byte, 1024)
_, err = stdout.Read(tmp)
klog.V(4).Info(string(tmp))
if err != nil {
break
}
}
}()
if err = cmd.Wait(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s: %s", err, string(exiterr.Stderr))
}
return err
}
return nil
}
func (o *PodmanCli) PodStop(podname string) error {
out, err := exec.Command("podman", "pod", "stop", podname).Output()
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s: %s", err, string(exiterr.Stderr))
}
return err
}
klog.V(4).Infof("Stopped pod %s", string(out))
return nil
}
func (o *PodmanCli) PodRm(podname string) error {
out, err := exec.Command("podman", "pod", "rm", podname).Output()
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s: %s", err, string(exiterr.Stderr))
}
return err
}
klog.V(4).Infof("Deleted pod %s", string(out))
return nil
}
func (o *PodmanCli) VolumeRm(volumeName string) error {
out, err := exec.Command("podman", "volume", "rm", volumeName).Output()
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s: %s", err, string(exiterr.Stderr))
}
return err
}
klog.V(4).Infof("Deleted volume %s", string(out))
return nil
}
func (o *PodmanCli) VolumeLs() (map[string]bool, error) {
out, err := exec.Command("podman", "volume", "ls", "--format", "{{.Name}}", "--noheading").Output()
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s: %s", err, string(exiterr.Stderr))
}
return nil, err
}
return SplitLinesAsSet(string(out)), nil
}
func SplitLinesAsSet(s string) map[string]bool {
lines := map[string]bool{}
sc := bufio.NewScanner(strings.NewReader(s))
for sc.Scan() {
lines[sc.Text()] = true
}
return lines
}

31
pkg/podman/pods.go Normal file
View File

@@ -0,0 +1,31 @@
package podman
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// GetPodsMatchingSelector returns all pods matching the given label selector.
func (o *PodmanCli) GetPodsMatchingSelector(selector string) (*corev1.PodList, error) {
// TODO(feloy) when pod is created with labels
return nil, nil
}
// GetAllResourcesFromSelector returns all resources of any kind matching the given label selector.
func (o *PodmanCli) GetAllResourcesFromSelector(selector string, ns string) ([]unstructured.Unstructured, error) {
// TODO(feloy) when pod is created with labels
return nil, nil
}
// GetAllPodsInNamespaceMatchingSelector returns all pods matching the given label selector and in the specified namespace.
func (o *PodmanCli) GetAllPodsInNamespaceMatchingSelector(selector string, ns string) (*corev1.PodList, error) {
// TODO(feloy) when pod is created with labels
return nil, nil
}
// GetRunningPodFromSelector returns any pod matching the given label selector.
// If multiple pods are found, implementations might have different behavior, by either returning an error or returning any element.
func (o *PodmanCli) GetRunningPodFromSelector(selector string) (*corev1.Pod, error) {
// TODO(feloy) when pod is created with labels
return nil, nil
}

View File

@@ -4,6 +4,8 @@ import (
"encoding/json"
"errors"
"io/fs"
"os"
"path/filepath"
"github.com/redhat-developer/odo/pkg/api"
"github.com/redhat-developer/odo/pkg/testingutil/filesystem"
@@ -51,6 +53,11 @@ func (o *State) save() error {
return err
}
// .odo directory is supposed to exist, don't create it
dir := filepath.Dir(_filepath)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
return o.fs.WriteFile(_filepath, jsonContent, 0644)
}

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
@@ -66,6 +67,9 @@ func (a SyncClient) ExtractProjectToComponent(containerName, podName string, tar
log.Errorf("stdout: %s\n", stdout.String())
log.Errorf("stderr: %s\n", stderr.String())
log.Errorf("err: %s\n", err.Error())
if exiterr, ok := err.(*exec.ExitError); ok {
log.Errorf("exit err: %s\n", string(exiterr.Stderr))
}
}
return err
}

View File

@@ -3,8 +3,6 @@ package watch
import (
"context"
"io"
"github.com/devfile/library/pkg/devfile/parser"
)
type Client interface {
@@ -14,6 +12,4 @@ type Client interface {
// parts of code (unfortunately, tthere is no place to store the status of the component in some Kubernetes resource
// as it is generally done for a Kubernetes resource)
WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context, componentStatus ComponentStatus) error
// CleanupDevResources deletes the component created using the devfileObj and writes any outputs to out
CleanupDevResources(ctx context.Context, devfileObj parser.DevfileObj, componentName string, out io.Writer) error
}

View File

@@ -9,7 +9,6 @@ import (
io "io"
reflect "reflect"
parser "github.com/devfile/library/pkg/devfile/parser"
gomock "github.com/golang/mock/gomock"
)
@@ -36,20 +35,6 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// CleanupDevResources mocks base method.
func (m *MockClient) CleanupDevResources(ctx context.Context, devfileObj parser.DevfileObj, componentName string, out io.Writer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupDevResources", ctx, devfileObj, componentName, out)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupDevResources indicates an expected call of CleanupDevResources.
func (mr *MockClientMockRecorder) CleanupDevResources(ctx, devfileObj, componentName, out interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDevResources", reflect.TypeOf((*MockClient)(nil).CleanupDevResources), ctx, devfileObj, componentName, out)
}
// WatchAndPush mocks base method.
func (m *MockClient) WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context, componentStatus ComponentStatus) error {
m.ctrl.T.Helper()

View File

@@ -12,14 +12,11 @@ import (
"github.com/devfile/library/pkg/devfile/parser"
_delete "github.com/redhat-developer/odo/pkg/component/delete"
"github.com/redhat-developer/odo/pkg/devfile/adapters"
"github.com/redhat-developer/odo/pkg/kclient"
"github.com/redhat-developer/odo/pkg/labels"
"github.com/redhat-developer/odo/pkg/libdevfile"
"github.com/redhat-developer/odo/pkg/log"
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
"github.com/redhat-developer/odo/pkg/state"
"github.com/fsnotify/fsnotify"
gitignore "github.com/sabhiram/go-gitignore"
@@ -42,9 +39,7 @@ const (
)
type WatchClient struct {
kubeClient kclient.ClientInterface
deleteClient _delete.Client
stateClient state.Client
kubeClient kclient.ClientInterface
sourcesWatcher *fsnotify.Watcher
deploymentWatcher watch.Interface
@@ -59,11 +54,9 @@ type WatchClient struct {
var _ Client = (*WatchClient)(nil)
func NewWatchClient(kubeClient kclient.ClientInterface, deleteClient _delete.Client, stateClient state.Client) *WatchClient {
func NewWatchClient(kubeClient kclient.ClientInterface) *WatchClient {
return &WatchClient{
kubeClient: kubeClient,
deleteClient: deleteClient,
stateClient: stateClient,
kubeClient: kubeClient,
}
}
@@ -488,34 +481,6 @@ func (o *WatchClient) processEvents(
return nil, nil
}
func (o *WatchClient) CleanupDevResources(ctx context.Context, devfileObj parser.DevfileObj, componentName string, out io.Writer) error {
fmt.Fprintln(out, "Cleaning resources, please wait")
appname := odocontext.GetApplication(ctx)
isInnerLoopDeployed, resources, err := o.deleteClient.ListResourcesToDeleteFromDevfile(devfileObj, appname, componentName, labels.ComponentDevMode)
if err != nil {
if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) {
fmt.Fprintf(out, "Error connecting to the cluster, the resources were not cleaned up.\nPlease log in again and cleanup the resource with `odo delete component`\n\n")
} else {
fmt.Fprintf(out, "Failed to delete inner loop resources: %v\n", err)
}
return err
}
// if innerloop deployment resource is present, then execute preStop events
if isInnerLoopDeployed {
err = o.deleteClient.ExecutePreStopEvents(devfileObj, appname, componentName)
if err != nil {
fmt.Fprint(out, "Failed to execute preStop events")
}
}
// delete all the resources
failed := o.deleteClient.DeleteResources(resources, true)
for _, fail := range failed {
fmt.Fprintf(out, "Failed to delete the %q resource: %s\n", fail.GetKind(), fail.GetName())
}
return o.stateClient.SaveExit()
}
func shouldIgnoreEvent(event fsnotify.Event) (ignoreEvent bool) {
if !(event.Op&fsnotify.Remove == fsnotify.Remove || event.Op&fsnotify.Rename == fsnotify.Rename) {
stat, err := os.Lstat(event.Name)

View File

@@ -86,3 +86,7 @@ mockgen -source=pkg/sync/interface.go \
mockgen -source=pkg/exec/interface.go \
-package exec \
-destination pkg/exec/mock.go
mockgen -source=pkg/podman/interface.go \
-package podman \
-destination pkg/podman/mock.go