mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
remove portworx from in-tree volume plugins
Signed-off-by: carlory <baofa.fan@daocloud.io>
This commit is contained in:
parent
c8fc0a1b98
commit
46727c48eb
8 changed files with 0 additions and 1135 deletions
|
|
@ -52,7 +52,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume/iscsi"
|
"k8s.io/kubernetes/pkg/volume/iscsi"
|
||||||
"k8s.io/kubernetes/pkg/volume/local"
|
"k8s.io/kubernetes/pkg/volume/local"
|
||||||
"k8s.io/kubernetes/pkg/volume/nfs"
|
"k8s.io/kubernetes/pkg/volume/nfs"
|
||||||
"k8s.io/kubernetes/pkg/volume/portworx"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/projected"
|
"k8s.io/kubernetes/pkg/volume/projected"
|
||||||
"k8s.io/kubernetes/pkg/volume/secret"
|
"k8s.io/kubernetes/pkg/volume/secret"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||||
|
|
@ -78,7 +77,6 @@ func volumePlugins() []volume.VolumePlugin {
|
||||||
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
|
|
||||||
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
|
||||||
return allPlugins
|
return allPlugins
|
||||||
|
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
|
||||||
|
|
||||||
approvers:
|
|
||||||
- saad-ali
|
|
||||||
reviewers:
|
|
||||||
- saad-ali
|
|
||||||
emeritus_approvers:
|
|
||||||
- lpabon
|
|
||||||
- rootfs
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package portworx contains the internal representation of Portworx
|
|
||||||
// Block Device volumes.
|
|
||||||
package portworx
|
|
||||||
|
|
@ -1,462 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package portworx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/mount-utils"
|
|
||||||
"k8s.io/utils/exec"
|
|
||||||
utilstrings "k8s.io/utils/strings"
|
|
||||||
|
|
||||||
volumeclient "github.com/libopenstorage/openstorage/api/client/volume"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
attachContextKey = "context"
|
|
||||||
attachHostKey = "host"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
|
||||||
return []volume.VolumePlugin{&portworxVolumePlugin{nil, nil}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type portworxVolumePlugin struct {
|
|
||||||
host volume.VolumeHost
|
|
||||||
util *portworxVolumeUtil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ volume.VolumePlugin = &portworxVolumePlugin{}
|
|
||||||
var _ volume.PersistentVolumePlugin = &portworxVolumePlugin{}
|
|
||||||
var _ volume.DeletableVolumePlugin = &portworxVolumePlugin{}
|
|
||||||
var _ volume.ProvisionableVolumePlugin = &portworxVolumePlugin{}
|
|
||||||
var _ volume.ExpandableVolumePlugin = &portworxVolumePlugin{}
|
|
||||||
|
|
||||||
const (
|
|
||||||
portworxVolumePluginName = "kubernetes.io/portworx-volume"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
|
||||||
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(portworxVolumePluginName), volName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) IsMigratedToCSI() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) Init(host volume.VolumeHost) error {
|
|
||||||
client, err := volumeclient.NewDriverClient(
|
|
||||||
fmt.Sprintf("http://%s", net.JoinHostPort(host.GetHostName(), strconv.Itoa(osdMgmtDefaultPort))),
|
|
||||||
pxdDriverName, osdDriverVersion, pxDriverName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
plugin.host = host
|
|
||||||
plugin.util = &portworxVolumeUtil{
|
|
||||||
portworxClient: client,
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) GetPluginName() string {
|
|
||||||
return portworxVolumePluginName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
|
||||||
volumeSource, _, err := getVolumeSource(spec)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumeSource.VolumeID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
|
||||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.PortworxVolume != nil) ||
|
|
||||||
(spec.Volume != nil && spec.Volume.PortworxVolume != nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) RequiresRemount(spec *volume.Spec) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
|
||||||
return []v1.PersistentVolumeAccessMode{
|
|
||||||
v1.ReadWriteOnce,
|
|
||||||
v1.ReadWriteMany,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
|
|
||||||
return plugin.newMounterInternal(spec, pod.UID, plugin.util, plugin.host.GetMounter())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager portworxManager, mounter mount.Interface) (volume.Mounter, error) {
|
|
||||||
pwx, readOnly, err := getVolumeSource(spec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeID := pwx.VolumeID
|
|
||||||
fsType := pwx.FSType
|
|
||||||
|
|
||||||
return &portworxVolumeMounter{
|
|
||||||
portworxVolume: &portworxVolume{
|
|
||||||
podUID: podUID,
|
|
||||||
volName: spec.Name(),
|
|
||||||
volumeID: volumeID,
|
|
||||||
manager: manager,
|
|
||||||
mounter: mounter,
|
|
||||||
plugin: plugin,
|
|
||||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
|
||||||
},
|
|
||||||
fsType: fsType,
|
|
||||||
readOnly: readOnly,
|
|
||||||
diskMounter: mount.NewSafeFormatAndMount(plugin.host.GetMounter(), exec.New())}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
|
||||||
return plugin.newUnmounterInternal(volName, podUID, plugin.util, plugin.host.GetMounter())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager portworxManager,
|
|
||||||
mounter mount.Interface) (volume.Unmounter, error) {
|
|
||||||
return &portworxVolumeUnmounter{
|
|
||||||
&portworxVolume{
|
|
||||||
podUID: podUID,
|
|
||||||
volName: volName,
|
|
||||||
manager: manager,
|
|
||||||
mounter: mounter,
|
|
||||||
plugin: plugin,
|
|
||||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
|
||||||
}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
|
|
||||||
return plugin.newDeleterInternal(spec, plugin.util)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) newDeleterInternal(spec *volume.Spec, manager portworxManager) (volume.Deleter, error) {
|
|
||||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.PortworxVolume == nil {
|
|
||||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.PortworxVolume is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &portworxVolumeDeleter{
|
|
||||||
portworxVolume: &portworxVolume{
|
|
||||||
volName: spec.Name(),
|
|
||||||
volumeID: spec.PersistentVolume.Spec.PortworxVolume.VolumeID,
|
|
||||||
manager: manager,
|
|
||||||
plugin: plugin,
|
|
||||||
}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
|
|
||||||
return plugin.newProvisionerInternal(options, plugin.util)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) newProvisionerInternal(options volume.VolumeOptions, manager portworxManager) (volume.Provisioner, error) {
|
|
||||||
return &portworxVolumeProvisioner{
|
|
||||||
portworxVolume: &portworxVolume{
|
|
||||||
manager: manager,
|
|
||||||
plugin: plugin,
|
|
||||||
},
|
|
||||||
options: options,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) RequiresFSResize() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) ExpandVolumeDevice(
|
|
||||||
spec *volume.Spec,
|
|
||||||
newSize resource.Quantity,
|
|
||||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
|
||||||
klog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize)
|
|
||||||
err := plugin.util.ResizeVolume(spec, newSize, plugin.host)
|
|
||||||
if err != nil {
|
|
||||||
return oldSize, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize)
|
|
||||||
return newSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
|
|
||||||
portworxVolume := &v1.Volume{
|
|
||||||
Name: volumeName,
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
PortworxVolume: &v1.PortworxVolumeSource{
|
|
||||||
VolumeID: volumeName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return volume.ReconstructedVolume{
|
|
||||||
Spec: volume.NewSpecFromVolume(portworxVolume),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) SupportsMountOption() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getVolumeSource(
|
|
||||||
spec *volume.Spec) (*v1.PortworxVolumeSource, bool, error) {
|
|
||||||
if spec.Volume != nil && spec.Volume.PortworxVolume != nil {
|
|
||||||
return spec.Volume.PortworxVolume, spec.Volume.PortworxVolume.ReadOnly, nil
|
|
||||||
} else if spec.PersistentVolume != nil &&
|
|
||||||
spec.PersistentVolume.Spec.PortworxVolume != nil {
|
|
||||||
return spec.PersistentVolume.Spec.PortworxVolume, spec.ReadOnly, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, false, fmt.Errorf("Spec does not reference a Portworx Volume type")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abstract interface to PD operations.
|
|
||||||
type portworxManager interface {
|
|
||||||
// Creates a volume
|
|
||||||
CreateVolume(provisioner *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error)
|
|
||||||
// Deletes a volume
|
|
||||||
DeleteVolume(deleter *portworxVolumeDeleter) error
|
|
||||||
// Attach a volume
|
|
||||||
AttachVolume(mounter *portworxVolumeMounter, attachOptions map[string]string) (string, error)
|
|
||||||
// Detach a volume
|
|
||||||
DetachVolume(unmounter *portworxVolumeUnmounter) error
|
|
||||||
// Mount a volume
|
|
||||||
MountVolume(mounter *portworxVolumeMounter, mountDir string) error
|
|
||||||
// Unmount a volume
|
|
||||||
UnmountVolume(unmounter *portworxVolumeUnmounter, mountDir string) error
|
|
||||||
// Resize a volume
|
|
||||||
ResizeVolume(spec *volume.Spec, newSize resource.Quantity, host volume.VolumeHost) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// portworxVolume volumes are portworx block devices
|
|
||||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
|
||||||
type portworxVolume struct {
|
|
||||||
volName string
|
|
||||||
podUID types.UID
|
|
||||||
// Unique id of the PD, used to find the disk resource in the provider.
|
|
||||||
volumeID string
|
|
||||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
|
||||||
manager portworxManager
|
|
||||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
|
||||||
mounter mount.Interface
|
|
||||||
plugin *portworxVolumePlugin
|
|
||||||
volume.MetricsProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
type portworxVolumeMounter struct {
|
|
||||||
*portworxVolume
|
|
||||||
// Filesystem type, optional.
|
|
||||||
fsType string
|
|
||||||
// Specifies whether the disk will be attached as read-only.
|
|
||||||
readOnly bool
|
|
||||||
// diskMounter provides the interface that is used to mount the actual block device.
|
|
||||||
diskMounter *mount.SafeFormatAndMount
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ volume.Mounter = &portworxVolumeMounter{}
|
|
||||||
|
|
||||||
func (b *portworxVolumeMounter) GetAttributes() volume.Attributes {
|
|
||||||
return volume.Attributes{
|
|
||||||
ReadOnly: b.readOnly,
|
|
||||||
Managed: !b.readOnly,
|
|
||||||
SELinuxRelabel: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUp attaches the disk and bind mounts to the volume path.
|
|
||||||
func (b *portworxVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
|
||||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
|
||||||
func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
|
||||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
|
||||||
klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Cannot validate mountpoint %s: %v", dir, err)
|
|
||||||
return fmt.Errorf("failed to validate mountpoint: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
if !notMnt {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
attachOptions := make(map[string]string)
|
|
||||||
attachOptions[attachContextKey] = dir
|
|
||||||
attachOptions[attachHostKey] = b.plugin.host.GetHostName()
|
|
||||||
if _, err := b.manager.AttachVolume(b, attachOptions); err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to attach volume %s: %v", b.volumeID, err)
|
|
||||||
return fmt.Errorf("failed to attach volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(4).Infof("Portworx Volume %s attached", b.volumeID)
|
|
||||||
|
|
||||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := b.manager.MountVolume(b, dir); err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to mount volume %s: %v", b.volumeID, err)
|
|
||||||
return fmt.Errorf("failed to mount volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
if !b.readOnly {
|
|
||||||
// Since portworxVolume is in process of being removed from in-tree, we avoid larger refactor to add progress tracking for ownership operation
|
|
||||||
ownershipChanger := volume.NewVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil))
|
|
||||||
_ = ownershipChanger.ChangePermissions()
|
|
||||||
}
|
|
||||||
klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pwx *portworxVolume) GetPath() string {
|
|
||||||
return getPath(pwx.podUID, pwx.volName, pwx.plugin.host)
|
|
||||||
}
|
|
||||||
|
|
||||||
type portworxVolumeUnmounter struct {
|
|
||||||
*portworxVolume
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ volume.Unmounter = &portworxVolumeUnmounter{}
|
|
||||||
|
|
||||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
|
||||||
// resource was the last reference to that disk on the kubelet.
|
|
||||||
func (c *portworxVolumeUnmounter) TearDown() error {
|
|
||||||
return c.TearDownAt(c.GetPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
|
||||||
// resource was the last reference to that disk on the kubelet.
|
|
||||||
func (c *portworxVolumeUnmounter) TearDownAt(dir string) error {
|
|
||||||
klog.Infof("Portworx Volume TearDown of %s", dir)
|
|
||||||
|
|
||||||
if err := c.manager.UnmountVolume(c, dir); err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to unmount volume %s: %v", c.volumeID, err)
|
|
||||||
return fmt.Errorf("failed to unmount volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call Portworx Detach Volume.
|
|
||||||
if err := c.manager.DetachVolume(c); err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to detach volume %s: %v", c.volumeID, err)
|
|
||||||
return fmt.Errorf("failed to detach volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type portworxVolumeDeleter struct {
|
|
||||||
*portworxVolume
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ volume.Deleter = &portworxVolumeDeleter{}
|
|
||||||
|
|
||||||
func (d *portworxVolumeDeleter) GetPath() string {
|
|
||||||
return getPath(d.podUID, d.volName, d.plugin.host)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *portworxVolumeDeleter) Delete() error {
|
|
||||||
err := d.manager.DeleteVolume(d)
|
|
||||||
if err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to delete volume %s: %v", d.volumeID, err)
|
|
||||||
return fmt.Errorf("failed to delete volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type portworxVolumeProvisioner struct {
|
|
||||||
*portworxVolume
|
|
||||||
options volume.VolumeOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ volume.Provisioner = &portworxVolumeProvisioner{}
|
|
||||||
|
|
||||||
func (c *portworxVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
|
||||||
if !util.ContainsAllAccessModes(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
|
||||||
}
|
|
||||||
|
|
||||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
|
||||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeID, sizeGiB, labels, err := c.manager.CreateVolume(c)
|
|
||||||
if err != nil {
|
|
||||||
// don't log error details from client calls in events
|
|
||||||
klog.V(4).Infof("Failed to create volume: %v", err)
|
|
||||||
return nil, fmt.Errorf("failed to create volume: see kube-controller-manager.log for details")
|
|
||||||
}
|
|
||||||
|
|
||||||
pv := &v1.PersistentVolume{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: c.options.PVName,
|
|
||||||
Labels: map[string]string{},
|
|
||||||
Annotations: map[string]string{
|
|
||||||
util.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PersistentVolumeSpec{
|
|
||||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
|
||||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
|
||||||
Capacity: v1.ResourceList{
|
|
||||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
|
||||||
},
|
|
||||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
|
||||||
PortworxVolume: &v1.PortworxVolumeSource{
|
|
||||||
VolumeID: volumeID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(labels) != 0 {
|
|
||||||
if pv.Labels == nil {
|
|
||||||
pv.Labels = make(map[string]string)
|
|
||||||
}
|
|
||||||
for k, v := range labels {
|
|
||||||
pv.Labels[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
|
||||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
|
||||||
}
|
|
||||||
|
|
||||||
return pv, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,238 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package portworx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/mount-utils"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
utiltesting "k8s.io/client-go/util/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
PortworxTestVolume = "portworx-test-vol"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("can't make a temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
|
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/portworx-volume")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Can't find the plugin by name")
|
|
||||||
}
|
|
||||||
if plug.GetPluginName() != "kubernetes.io/portworx-volume" {
|
|
||||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
|
||||||
}
|
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{PortworxVolume: &v1.PortworxVolumeSource{}}}}) {
|
|
||||||
t.Errorf("Expected true")
|
|
||||||
}
|
|
||||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{PortworxVolume: &v1.PortworxVolumeSource{}}}}}) {
|
|
||||||
t.Errorf("Expected true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAccessModes(t *testing.T) {
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("can't make a temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
|
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/portworx-volume")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Can't find the plugin by name")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) {
|
|
||||||
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
|
|
||||||
}
|
|
||||||
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
|
|
||||||
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteMany)
|
|
||||||
}
|
|
||||||
if volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
|
||||||
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakePortworxManager struct {
|
|
||||||
attachCalled bool
|
|
||||||
mountCalled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) AttachVolume(b *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
|
|
||||||
fake.attachCalled = true
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) DetachVolume(c *portworxVolumeUnmounter) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) MountVolume(b *portworxVolumeMounter, mountPath string) error {
|
|
||||||
fake.mountCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) UnmountVolume(c *portworxVolumeUnmounter, mountPath string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) CreateVolume(c *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error) {
|
|
||||||
labels = make(map[string]string)
|
|
||||||
labels["fakeportworxmanager"] = "yes"
|
|
||||||
return PortworxTestVolume, 100, labels, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) DeleteVolume(cd *portworxVolumeDeleter) error {
|
|
||||||
if cd.volumeID != PortworxTestVolume {
|
|
||||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fake *fakePortworxManager) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPlugin(t *testing.T) {
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("can't make a temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
|
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/portworx-volume")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Can't find the plugin by name")
|
|
||||||
}
|
|
||||||
spec := &v1.Volume{
|
|
||||||
Name: "vol1",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
PortworxVolume: &v1.PortworxVolumeSource{
|
|
||||||
VolumeID: PortworxTestVolume,
|
|
||||||
FSType: "ext4",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fakeManager := &fakePortworxManager{}
|
|
||||||
// Test Mounter
|
|
||||||
fakeMounter := mount.NewFakeMounter(nil)
|
|
||||||
mounter, err := plug.(*portworxVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
|
||||||
}
|
|
||||||
if mounter == nil {
|
|
||||||
t.Errorf("Got a nil Mounter")
|
|
||||||
}
|
|
||||||
|
|
||||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~portworx-volume/vol1")
|
|
||||||
path := mounter.GetPath()
|
|
||||||
if path != volPath {
|
|
||||||
t.Errorf("Got unexpected path: %s", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
|
||||||
t.Errorf("Expected success, got: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(path); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
|
||||||
} else {
|
|
||||||
t.Errorf("SetUp() failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !fakeManager.attachCalled {
|
|
||||||
t.Errorf("Attach watch not called")
|
|
||||||
}
|
|
||||||
if !fakeManager.mountCalled {
|
|
||||||
t.Errorf("Mount watch not called")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Unmounter
|
|
||||||
fakeManager = &fakePortworxManager{}
|
|
||||||
unmounter, err := plug.(*portworxVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
|
||||||
}
|
|
||||||
if unmounter == nil {
|
|
||||||
t.Errorf("Got a nil Unmounter")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := unmounter.TearDown(); err != nil {
|
|
||||||
t.Errorf("Expected success, got: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Provisioner
|
|
||||||
options := volume.VolumeOptions{
|
|
||||||
PVC: volumetest.CreateTestPVC("100Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
|
||||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner, err := plug.(*portworxVolumePlugin).newProvisionerInternal(options, &fakePortworxManager{})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error creating a new provisioner:%v", err)
|
|
||||||
}
|
|
||||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Provision() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if persistentSpec.Spec.PersistentVolumeSource.PortworxVolume.VolumeID != PortworxTestVolume {
|
|
||||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.PortworxVolume.VolumeID)
|
|
||||||
}
|
|
||||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
|
||||||
size := cap.Value()
|
|
||||||
if size != 100*1024*1024*1024 {
|
|
||||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if persistentSpec.Labels["fakeportworxmanager"] != "yes" {
|
|
||||||
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Deleter
|
|
||||||
volSpec := &volume.Spec{
|
|
||||||
PersistentVolume: persistentSpec,
|
|
||||||
}
|
|
||||||
deleter, err := plug.(*portworxVolumePlugin).newDeleterInternal(volSpec, &fakePortworxManager{})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error creating a new Deleter:%v", err)
|
|
||||||
}
|
|
||||||
err = deleter.Delete()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Deleter() failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,387 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package portworx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
osdapi "github.com/libopenstorage/openstorage/api"
|
|
||||||
osdclient "github.com/libopenstorage/openstorage/api/client"
|
|
||||||
volumeclient "github.com/libopenstorage/openstorage/api/client/volume"
|
|
||||||
osdspec "github.com/libopenstorage/openstorage/api/spec"
|
|
||||||
volumeapi "github.com/libopenstorage/openstorage/volume"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
osdMgmtDefaultPort = 9001
|
|
||||||
osdDriverVersion = "v1"
|
|
||||||
pxdDriverName = "pxd"
|
|
||||||
pvcClaimLabel = "pvc"
|
|
||||||
pvcNamespaceLabel = "namespace"
|
|
||||||
pxServiceName = "portworx-service"
|
|
||||||
pxDriverName = "pxd-sched"
|
|
||||||
)
|
|
||||||
|
|
||||||
type portworxVolumeUtil struct {
|
|
||||||
portworxClient *osdclient.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateVolume creates a Portworx volume.
|
|
||||||
func (util *portworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) {
|
|
||||||
driver, err := util.getPortworxDriver(p.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return "", 0, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name)
|
|
||||||
|
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
|
||||||
// Portworx Volumes are specified in GiB
|
|
||||||
requestGiB, err := volumehelpers.RoundUpToGiB(capacity)
|
|
||||||
if err != nil {
|
|
||||||
return "", 0, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from
|
|
||||||
// spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or
|
|
||||||
// doesn't support new parameters, the server-side processing will parse it correctly.
|
|
||||||
// We still need to call SpecFromOpts() here to handle cases where someone is running Portworx 1.2.8 and lower.
|
|
||||||
specHandler := osdspec.NewSpecHandler()
|
|
||||||
spec, locator, source, _ := specHandler.SpecFromOpts(p.options.Parameters)
|
|
||||||
if spec == nil {
|
|
||||||
spec = specHandler.DefaultSpec()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass all parameters as volume labels for Portworx server-side processing
|
|
||||||
if spec.VolumeLabels == nil {
|
|
||||||
spec.VolumeLabels = make(map[string]string, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range p.options.Parameters {
|
|
||||||
spec.VolumeLabels[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the requested size in the spec
|
|
||||||
spec.Size = uint64(requestGiB * volumehelpers.GiB)
|
|
||||||
|
|
||||||
// Change the Portworx Volume name to PV name
|
|
||||||
if locator == nil {
|
|
||||||
locator = &osdapi.VolumeLocator{
|
|
||||||
VolumeLabels: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
locator.Name = p.options.PVName
|
|
||||||
|
|
||||||
// Add claim Name as a part of Portworx Volume Labels
|
|
||||||
locator.VolumeLabels[pvcClaimLabel] = p.options.PVC.Name
|
|
||||||
locator.VolumeLabels[pvcNamespaceLabel] = p.options.PVC.Namespace
|
|
||||||
|
|
||||||
for k, v := range p.options.PVC.Annotations {
|
|
||||||
if _, present := spec.VolumeLabels[k]; present {
|
|
||||||
klog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
spec.VolumeLabels[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeID, err := driver.Create(locator, source, spec)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error creating Portworx Volume : %v", err)
|
|
||||||
return "", 0, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name)
|
|
||||||
return volumeID, requestGiB, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteVolume deletes a Portworx volume
|
|
||||||
func (util *portworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
|
|
||||||
driver, err := util.getPortworxDriver(d.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = driver.Delete(d.volumeID)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttachVolume attaches a Portworx Volume
|
|
||||||
func (util *portworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
|
|
||||||
driver, err := util.getLocalPortworxDriver(m.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
devicePath, err := driver.Attach(m.volName, attachOptions)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return devicePath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachVolume detaches a Portworx Volume
|
|
||||||
func (util *portworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
|
|
||||||
driver, err := util.getLocalPortworxDriver(u.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = driver.Detach(u.volName, false /*doNotForceDetach*/)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MountVolume mounts a Portworx Volume on the specified mountPath
|
|
||||||
func (util *portworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {
|
|
||||||
driver, err := util.getLocalPortworxDriver(m.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = driver.Mount(m.volName, mountPath)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmountVolume unmounts a Portworx Volume
|
|
||||||
func (util *portworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {
|
|
||||||
driver, err := util.getLocalPortworxDriver(u.plugin.host)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = driver.Unmount(u.volName, mountPath)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (util *portworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
|
||||||
driver, err := util.getPortworxDriver(volumeHost)
|
|
||||||
if err != nil || driver == nil {
|
|
||||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
vols, err := driver.Inspect([]string{spec.Name()})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vols) != 1 {
|
|
||||||
return fmt.Errorf("failed to inspect Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
|
|
||||||
}
|
|
||||||
|
|
||||||
vol := vols[0]
|
|
||||||
tBytes, err := volumehelpers.RoundUpToB(newSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newSizeInBytes := uint64(tBytes)
|
|
||||||
if vol.Spec.Size >= newSizeInBytes {
|
|
||||||
klog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
|
|
||||||
"requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
vol.Spec.Size = newSizeInBytes
|
|
||||||
err = driver.Set(spec.Name(), vol.Locator, vol.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if the volume's size actually got updated
|
|
||||||
vols, err = driver.Inspect([]string{spec.Name()})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vols) != 1 {
|
|
||||||
return fmt.Errorf("failed to inspect resized Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedVol := vols[0]
|
|
||||||
if updatedVol.Spec.Size < vol.Spec.Size {
|
|
||||||
return fmt.Errorf("Portworx volume: %s doesn't match expected size after resize. expected:%v actual:%v",
|
|
||||||
spec.Name(), vol.Spec.Size, updatedVol.Spec.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isClientValid(client *osdclient.Client) (bool, error) {
|
|
||||||
if client == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := client.Versions(osdapi.OsdVolumePath)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("portworx client failed driver versions check. Err: %v", err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createDriverClient(hostname string, port int32) (*osdclient.Client, error) {
|
|
||||||
client, err := volumeclient.NewDriverClient(fmt.Sprintf("http://%s", net.JoinHostPort(hostname, strconv.Itoa(int(port)))),
|
|
||||||
pxdDriverName, osdDriverVersion, pxDriverName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
isValid, err := isClientValid(client)
|
|
||||||
if isValid {
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPortworxDriver returns a Portworx volume driver which can be used for cluster wide operations.
|
|
||||||
//
|
|
||||||
// Operations like create and delete volume don't need to be restricted to local volume host since
|
|
||||||
// any node in the Portworx cluster can coordinate the create/delete request and forward the operations to
|
|
||||||
// the Portworx node that will own/owns the data.
|
|
||||||
func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
|
|
||||||
// check if existing saved client is valid
|
|
||||||
if isValid, _ := isClientValid(util.portworxClient); isValid {
|
|
||||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new client
|
|
||||||
var err error
|
|
||||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osdMgmtDefaultPort) // for backward compatibility
|
|
||||||
if err != nil || util.portworxClient == nil {
|
|
||||||
// Create client from portworx k8s service.
|
|
||||||
svc, err := getPortworxService(volumeHost)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The port here is always the default one since it's the service port
|
|
||||||
util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP, osdMgmtDefaultPort)
|
|
||||||
if err != nil || util.portworxClient == nil {
|
|
||||||
klog.Errorf("Failed to connect to portworx service. Err: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof("Using portworx cluster service at: %v:%d as api endpoint",
|
|
||||||
svc.Spec.ClusterIP, osdMgmtDefaultPort)
|
|
||||||
} else {
|
|
||||||
klog.Infof("Using portworx service at: %v:%d as api endpoint",
|
|
||||||
volumeHost.GetHostName(), osdMgmtDefaultPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getLocalPortworxDriver returns driver connected to Portworx API server on volume host.
|
|
||||||
//
|
|
||||||
// This is required to force certain operations (mount, unmount, detach, attach) to
|
|
||||||
// go to the volume host instead of the k8s service which might route it to any host. This pertains to how
|
|
||||||
// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to
|
|
||||||
// see the pod container mounts (specifically /var/lib/kubelet/pods/<pod_id>)
|
|
||||||
func (util *portworxVolumeUtil) getLocalPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
|
|
||||||
if util.portworxClient != nil {
|
|
||||||
// check if existing saved client is valid
|
|
||||||
if isValid, _ := isClientValid(util.portworxClient); isValid {
|
|
||||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup port
|
|
||||||
svc, err := getPortworxService(volumeHost)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
osgMgmtPort := lookupPXAPIPortFromService(svc)
|
|
||||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osgMgmtPort)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof("Using portworx local service at: %v:%d as api endpoint",
|
|
||||||
volumeHost.GetHostName(), osgMgmtPort)
|
|
||||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookupPXAPIPortFromService goes over all the ports in the given service and returns the target
|
|
||||||
// port for osdMgmtDefaultPort
|
|
||||||
func lookupPXAPIPortFromService(svc *v1.Service) int32 {
|
|
||||||
for _, p := range svc.Spec.Ports {
|
|
||||||
if p.Port == osdMgmtDefaultPort {
|
|
||||||
return p.TargetPort.IntVal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return osdMgmtDefaultPort // default
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPortworxService returns the portworx cluster service from the API server
|
|
||||||
func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
|
||||||
kubeClient := host.GetKubeClient()
|
|
||||||
if kubeClient == nil {
|
|
||||||
err := fmt.Errorf("failed to get kubeclient when creating portworx client")
|
|
||||||
klog.Error(err.Error())
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := metav1.GetOptions{}
|
|
||||||
svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(context.TODO(), pxServiceName, opts)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Failed to get service. Err: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if svc == nil {
|
|
||||||
err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName)
|
|
||||||
klog.Error(err.Error())
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc, nil
|
|
||||||
}
|
|
||||||
|
|
@ -187,9 +187,6 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||||
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||||
eventsRule(),
|
eventsRule(),
|
||||||
|
|
||||||
// volume plugin - portworx
|
|
||||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services").RuleOrDie(),
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -313,9 +310,6 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||||
|
|
||||||
// recyclerClient.WatchPod
|
// recyclerClient.WatchPod
|
||||||
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||||
|
|
||||||
// volume plugin - portworx
|
|
||||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services").RuleOrDie(),
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
|
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
|
||||||
|
|
|
||||||
|
|
@ -654,12 +654,6 @@ items:
|
||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiVersion: rbac.authorization.k8s.io/v1
|
- apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
|
|
@ -1000,12 +994,6 @@ items:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiVersion: rbac.authorization.k8s.io/v1
|
- apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue