2021-08-12 17:13:11 -04:00
//go:build linux
2017-02-10 00:14:10 -05:00
// +build linux
/ *
Copyright 2017 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2019-11-06 22:59:05 -05:00
package e2enode
2017-02-10 00:14:10 -05:00
import (
2020-02-07 21:16:47 -05:00
"context"
2017-02-10 00:14:10 -05:00
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
2017-11-16 14:16:58 -05:00
"time"
2017-02-10 00:14:10 -05:00
2017-06-22 14:24:23 -04:00
"k8s.io/api/core/v1"
2017-02-10 00:14:10 -05:00
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2018-08-29 12:07:52 -04:00
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
2017-02-10 00:14:10 -05:00
"k8s.io/kubernetes/pkg/kubelet/cm"
2019-02-01 17:15:08 -05:00
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
2021-10-27 10:26:09 -04:00
2017-02-10 00:14:10 -05:00
"k8s.io/kubernetes/test/e2e/framework"
2021-05-24 08:21:23 -04:00
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
2021-10-27 10:26:09 -04:00
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
2017-02-10 00:14:10 -05:00
2019-07-28 00:49:36 -04:00
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
2017-02-10 00:14:10 -05:00
)
2017-07-13 19:15:05 -04:00
func setDesiredConfiguration ( initialConfig * kubeletconfig . KubeletConfiguration ) {
2018-05-08 19:15:26 -04:00
initialConfig . EnforceNodeAllocatable = [ ] string { "pods" , kubeReservedCgroup , systemReservedCgroup }
Lift embedded structure out of eviction-related KubeletConfiguration fields
- Changes the following KubeletConfiguration fields from `string` to
`map[string]string`:
- `EvictionHard`
- `EvictionSoft`
- `EvictionSoftGracePeriod`
- `EvictionMinimumReclaim`
- Adds flag parsing shims to maintain Kubelet's public flags API, while
enabling structured input in the file API.
- Also removes `kubeletconfig.ConfigurationMap`, which was an ad-hoc flag
parsing shim living in the kubeletconfig API group, and replaces it
with the `MapStringString` shim introduced in this PR. Flag parsing
shims belong in a common place, not in the kubeletconfig API.
I manually audited these to ensure that this wouldn't cause errors
parsing the command line for syntax that would have previously been
error free (`kubeletconfig.ConfigurationMap` was unique in that it
allowed keys to be provided on the CLI without values. I believe this was
done in `flags.ConfigurationMap` to facilitate the `--node-labels` flag,
which rightfully accepts value-free keys, and that this shim was then
just copied to `kubeletconfig`). Fortunately, the affected fields
(`ExperimentalQOSReserved`, `SystemReserved`, and `KubeReserved`) expect
non-empty strings in the values of the map, and as a result passing the
empty string is already an error. Thus requiring keys shouldn't break
anyone's scripts.
- Updates code and tests accordingly.
Regarding eviction operators, directionality is already implicit in the
signal type (for a given signal, the decision to evict will be made when
crossing the threshold from either above or below, never both). There is
no need to expose an operator, such as `<`, in the API. By changing
`EvictionHard` and `EvictionSoft` to `map[string]string`, this PR
simplifies the experience of working with these fields via the
`KubeletConfiguration` type. Again, flags stay the same.
Other things:
- There is another flag parsing shim, `flags.ConfigurationMap`, from the
shared flag utility. The `NodeLabels` field still uses
`flags.ConfigurationMap`. This PR moves the allocation of the
`map[string]string` for the `NodeLabels` field from
`AddKubeletConfigFlags` to the defaulter for the external
`KubeletConfiguration` type. Flags are layered on top of an internal
object that has undergone conversion from a defaulted external object,
which means that previously the mere registration of flags would have
overwritten any previously-defined defaults for `NodeLabels` (fortunately
there were none).
2017-10-19 18:42:07 -04:00
initialConfig . SystemReserved = map [ string ] string {
2017-08-15 03:56:18 -04:00
string ( v1 . ResourceCPU ) : "100m" ,
string ( v1 . ResourceMemory ) : "100Mi" ,
2019-02-01 17:15:08 -05:00
string ( pidlimit . PIDs ) : "1000" ,
2017-02-10 00:14:10 -05:00
}
Lift embedded structure out of eviction-related KubeletConfiguration fields
- Changes the following KubeletConfiguration fields from `string` to
`map[string]string`:
- `EvictionHard`
- `EvictionSoft`
- `EvictionSoftGracePeriod`
- `EvictionMinimumReclaim`
- Adds flag parsing shims to maintain Kubelet's public flags API, while
enabling structured input in the file API.
- Also removes `kubeletconfig.ConfigurationMap`, which was an ad-hoc flag
parsing shim living in the kubeletconfig API group, and replaces it
with the `MapStringString` shim introduced in this PR. Flag parsing
shims belong in a common place, not in the kubeletconfig API.
I manually audited these to ensure that this wouldn't cause errors
parsing the command line for syntax that would have previously been
error free (`kubeletconfig.ConfigurationMap` was unique in that it
allowed keys to be provided on the CLI without values. I believe this was
done in `flags.ConfigurationMap` to facilitate the `--node-labels` flag,
which rightfully accepts value-free keys, and that this shim was then
just copied to `kubeletconfig`). Fortunately, the affected fields
(`ExperimentalQOSReserved`, `SystemReserved`, and `KubeReserved`) expect
non-empty strings in the values of the map, and as a result passing the
empty string is already an error. Thus requiring keys shouldn't break
anyone's scripts.
- Updates code and tests accordingly.
Regarding eviction operators, directionality is already implicit in the
signal type (for a given signal, the decision to evict will be made when
crossing the threshold from either above or below, never both). There is
no need to expose an operator, such as `<`, in the API. By changing
`EvictionHard` and `EvictionSoft` to `map[string]string`, this PR
simplifies the experience of working with these fields via the
`KubeletConfiguration` type. Again, flags stay the same.
Other things:
- There is another flag parsing shim, `flags.ConfigurationMap`, from the
shared flag utility. The `NodeLabels` field still uses
`flags.ConfigurationMap`. This PR moves the allocation of the
`map[string]string` for the `NodeLabels` field from
`AddKubeletConfigFlags` to the defaulter for the external
`KubeletConfiguration` type. Flags are layered on top of an internal
object that has undergone conversion from a defaulted external object,
which means that previously the mere registration of flags would have
overwritten any previously-defined defaults for `NodeLabels` (fortunately
there were none).
2017-10-19 18:42:07 -04:00
initialConfig . KubeReserved = map [ string ] string {
2017-08-15 03:56:18 -04:00
string ( v1 . ResourceCPU ) : "100m" ,
string ( v1 . ResourceMemory ) : "100Mi" ,
2019-02-01 17:15:08 -05:00
string ( pidlimit . PIDs ) : "738" ,
2017-02-10 00:14:10 -05:00
}
Lift embedded structure out of eviction-related KubeletConfiguration fields
- Changes the following KubeletConfiguration fields from `string` to
`map[string]string`:
- `EvictionHard`
- `EvictionSoft`
- `EvictionSoftGracePeriod`
- `EvictionMinimumReclaim`
- Adds flag parsing shims to maintain Kubelet's public flags API, while
enabling structured input in the file API.
- Also removes `kubeletconfig.ConfigurationMap`, which was an ad-hoc flag
parsing shim living in the kubeletconfig API group, and replaces it
with the `MapStringString` shim introduced in this PR. Flag parsing
shims belong in a common place, not in the kubeletconfig API.
I manually audited these to ensure that this wouldn't cause errors
parsing the command line for syntax that would have previously been
error free (`kubeletconfig.ConfigurationMap` was unique in that it
allowed keys to be provided on the CLI without values. I believe this was
done in `flags.ConfigurationMap` to facilitate the `--node-labels` flag,
which rightfully accepts value-free keys, and that this shim was then
just copied to `kubeletconfig`). Fortunately, the affected fields
(`ExperimentalQOSReserved`, `SystemReserved`, and `KubeReserved`) expect
non-empty strings in the values of the map, and as a result passing the
empty string is already an error. Thus requiring keys shouldn't break
anyone's scripts.
- Updates code and tests accordingly.
Regarding eviction operators, directionality is already implicit in the
signal type (for a given signal, the decision to evict will be made when
crossing the threshold from either above or below, never both). There is
no need to expose an operator, such as `<`, in the API. By changing
`EvictionHard` and `EvictionSoft` to `map[string]string`, this PR
simplifies the experience of working with these fields via the
`KubeletConfiguration` type. Again, flags stay the same.
Other things:
- There is another flag parsing shim, `flags.ConfigurationMap`, from the
shared flag utility. The `NodeLabels` field still uses
`flags.ConfigurationMap`. This PR moves the allocation of the
`map[string]string` for the `NodeLabels` field from
`AddKubeletConfigFlags` to the defaulter for the external
`KubeletConfiguration` type. Flags are layered on top of an internal
object that has undergone conversion from a defaulted external object,
which means that previously the mere registration of flags would have
overwritten any previously-defined defaults for `NodeLabels` (fortunately
there were none).
2017-10-19 18:42:07 -04:00
initialConfig . EvictionHard = map [ string ] string { "memory.available" : "100Mi" }
2017-02-10 00:14:10 -05:00
// Necessary for allocatable cgroup creation.
initialConfig . CgroupsPerQOS = true
initialConfig . KubeReservedCgroup = kubeReservedCgroup
initialConfig . SystemReservedCgroup = systemReservedCgroup
}
2021-02-22 13:53:33 -05:00
var _ = SIGDescribe ( "Node Container Manager [Serial]" , func ( ) {
2017-02-10 00:14:10 -05:00
f := framework . NewDefaultFramework ( "node-container-manager" )
2019-07-28 00:49:36 -04:00
ginkgo . Describe ( "Validate Node Allocatable [NodeFeature:NodeAllocatable]" , func ( ) {
ginkgo . It ( "sets up the node and runs the test" , func ( ) {
2017-02-10 00:14:10 -05:00
framework . ExpectNoError ( runTest ( f ) )
} )
} )
} )
func expectFileValToEqual ( filePath string , expectedValue , delta int64 ) error {
out , err := ioutil . ReadFile ( filePath )
if err != nil {
return fmt . Errorf ( "failed to read file %q" , filePath )
}
actual , err := strconv . ParseInt ( strings . TrimSpace ( string ( out ) ) , 10 , 64 )
if err != nil {
return fmt . Errorf ( "failed to parse output %v" , err )
}
2019-02-15 05:11:29 -05:00
// Ensure that values are within a delta range to work around rounding errors.
2017-02-10 00:14:10 -05:00
if ( actual < ( expectedValue - delta ) ) || ( actual > ( expectedValue + delta ) ) {
return fmt . Errorf ( "Expected value at %q to be between %d and %d. Got %d" , filePath , ( expectedValue - delta ) , ( expectedValue + delta ) , actual )
}
return nil
}
2019-02-01 17:15:08 -05:00
func getAllocatableLimits ( cpu , memory , pids string , capacity v1 . ResourceList ) ( * resource . Quantity , * resource . Quantity , * resource . Quantity ) {
var allocatableCPU , allocatableMemory , allocatablePIDs * resource . Quantity
2017-02-10 00:14:10 -05:00
// Total cpu reservation is 200m.
for k , v := range capacity {
if k == v1 . ResourceCPU {
2019-08-19 20:23:14 -04:00
c := v . DeepCopy ( )
allocatableCPU = & c
2017-02-10 00:14:10 -05:00
allocatableCPU . Sub ( resource . MustParse ( cpu ) )
}
if k == v1 . ResourceMemory {
2019-08-19 20:23:14 -04:00
c := v . DeepCopy ( )
allocatableMemory = & c
2017-02-10 00:14:10 -05:00
allocatableMemory . Sub ( resource . MustParse ( memory ) )
}
}
2019-02-01 17:15:08 -05:00
// Process IDs are not a node allocatable, so we have to do this ad hoc
pidlimits , err := pidlimit . Stats ( )
if err == nil && pidlimits != nil && pidlimits . MaxPID != nil {
allocatablePIDs = resource . NewQuantity ( int64 ( * pidlimits . MaxPID ) , resource . DecimalSI )
allocatablePIDs . Sub ( resource . MustParse ( pids ) )
}
return allocatableCPU , allocatableMemory , allocatablePIDs
2017-02-10 00:14:10 -05:00
}
const (
2018-05-08 19:15:26 -04:00
kubeReservedCgroup = "kube-reserved"
systemReservedCgroup = "system-reserved"
2017-02-10 00:14:10 -05:00
)
func createIfNotExists ( cm cm . CgroupManager , cgroupConfig * cm . CgroupConfig ) error {
if ! cm . Exists ( cgroupConfig . Name ) {
if err := cm . Create ( cgroupConfig ) ; err != nil {
return err
}
}
return nil
}
func createTemporaryCgroupsForReservation ( cgroupManager cm . CgroupManager ) error {
// Create kube reserved cgroup
cgroupConfig := & cm . CgroupConfig {
2018-03-28 00:11:00 -04:00
Name : cm . NewCgroupName ( cm . RootCgroupName , kubeReservedCgroup ) ,
2017-02-10 00:14:10 -05:00
}
if err := createIfNotExists ( cgroupManager , cgroupConfig ) ; err != nil {
return err
}
// Create system reserved cgroup
2018-03-28 00:11:00 -04:00
cgroupConfig . Name = cm . NewCgroupName ( cm . RootCgroupName , systemReservedCgroup )
2017-02-10 00:14:10 -05:00
return createIfNotExists ( cgroupManager , cgroupConfig )
}
func destroyTemporaryCgroupsForReservation ( cgroupManager cm . CgroupManager ) error {
// Create kube reserved cgroup
cgroupConfig := & cm . CgroupConfig {
2018-03-28 00:11:00 -04:00
Name : cm . NewCgroupName ( cm . RootCgroupName , kubeReservedCgroup ) ,
2017-02-10 00:14:10 -05:00
}
if err := cgroupManager . Destroy ( cgroupConfig ) ; err != nil {
return err
}
2018-03-28 00:11:00 -04:00
cgroupConfig . Name = cm . NewCgroupName ( cm . RootCgroupName , systemReservedCgroup )
2017-02-10 00:14:10 -05:00
return cgroupManager . Destroy ( cgroupConfig )
}
2020-04-07 07:09:45 -04:00
// convertSharesToWeight converts from cgroup v1 cpu.shares to cgroup v2 cpu.weight
func convertSharesToWeight ( shares int64 ) int64 {
return 1 + ( ( shares - 2 ) * 9999 ) / 262142
}
2017-02-10 00:14:10 -05:00
func runTest ( f * framework . Framework ) error {
2017-07-13 19:15:05 -04:00
var oldCfg * kubeletconfig . KubeletConfiguration
2017-02-10 00:14:10 -05:00
subsystems , err := cm . GetCgroupSubsystems ( )
if err != nil {
return err
}
// Get current kubelet configuration
oldCfg , err = getCurrentKubeletConfig ( )
if err != nil {
return err
}
2021-05-24 08:21:23 -04:00
// Test needs to be updated to make it run properly on systemd.
// In its current state it will result in kubelet error since
// kubeReservedCgroup and systemReservedCgroup are not configured
// correctly for systemd.
// See: https://github.com/kubernetes/kubernetes/issues/102394
if oldCfg . CgroupDriver == "systemd" {
e2eskipper . Skipf ( "unable to run test when using systemd as cgroup driver" )
}
2017-02-10 00:14:10 -05:00
// Create a cgroup manager object for manipulating cgroups.
cgroupManager := cm . NewCgroupManager ( subsystems , oldCfg . CgroupDriver )
defer destroyTemporaryCgroupsForReservation ( cgroupManager )
defer func ( ) {
if oldCfg != nil {
2021-10-27 10:26:09 -04:00
// Update the Kubelet configuration.
ginkgo . By ( "Stopping the kubelet" )
startKubelet := stopKubelet ( )
// wait until the kubelet health check will fail
gomega . Eventually ( func ( ) bool {
return kubeletHealthCheck ( kubeletHealthCheckURL )
} , time . Minute , time . Second ) . Should ( gomega . BeFalse ( ) )
framework . ExpectNoError ( e2enodekubelet . WriteKubeletConfigFile ( oldCfg ) )
ginkgo . By ( "Starting the kubelet" )
startKubelet ( )
// wait until the kubelet health check will succeed
gomega . Eventually ( func ( ) bool {
return kubeletHealthCheck ( kubeletHealthCheckURL )
} , 2 * time . Minute , 5 * time . Second ) . Should ( gomega . BeTrue ( ) )
2017-02-10 00:14:10 -05:00
}
} ( )
if err := createTemporaryCgroupsForReservation ( cgroupManager ) ; err != nil {
return err
}
2017-08-15 08:15:41 -04:00
newCfg := oldCfg . DeepCopy ( )
2017-02-10 00:14:10 -05:00
// Change existing kubelet configuration
setDesiredConfiguration ( newCfg )
// Set the new kubelet configuration.
2021-10-27 10:26:09 -04:00
// Update the Kubelet configuration.
ginkgo . By ( "Stopping the kubelet" )
startKubelet := stopKubelet ( )
// wait until the kubelet health check will fail
gomega . Eventually ( func ( ) bool {
return kubeletHealthCheck ( kubeletHealthCheckURL )
} , time . Minute , time . Second ) . Should ( gomega . BeFalse ( ) )
framework . ExpectNoError ( e2enodekubelet . WriteKubeletConfigFile ( newCfg ) )
ginkgo . By ( "Starting the kubelet" )
startKubelet ( )
// wait until the kubelet health check will succeed
gomega . Eventually ( func ( ) bool {
return kubeletHealthCheck ( kubeletHealthCheckURL )
} , 2 * time . Minute , 5 * time . Second ) . Should ( gomega . BeTrue ( ) )
2017-02-10 00:14:10 -05:00
if err != nil {
return err
}
// Set new config and current config.
currentConfig := newCfg
2018-03-28 00:11:00 -04:00
expectedNAPodCgroup := cm . ParseCgroupfsToCgroupName ( currentConfig . CgroupRoot )
expectedNAPodCgroup = cm . NewCgroupName ( expectedNAPodCgroup , "kubepods" )
if ! cgroupManager . Exists ( expectedNAPodCgroup ) {
2020-04-07 07:09:45 -04:00
return fmt . Errorf ( "Expected Node Allocatable Cgroup %q does not exist" , expectedNAPodCgroup )
2017-02-10 00:14:10 -05:00
}
2020-04-07 07:09:45 -04:00
memoryLimitFile := "memory.limit_in_bytes"
if IsCgroup2UnifiedMode ( ) {
memoryLimitFile = "memory.max"
}
2017-02-10 00:14:10 -05:00
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
2017-11-16 14:16:58 -05:00
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
2019-07-28 00:49:36 -04:00
gomega . Eventually ( func ( ) error {
2020-02-07 21:16:47 -05:00
nodeList , err := f . ClientSet . CoreV1 ( ) . Nodes ( ) . List ( context . TODO ( ) , metav1 . ListOptions { } )
2017-11-16 14:16:58 -05:00
if err != nil {
return err
}
if len ( nodeList . Items ) != 1 {
return fmt . Errorf ( "Unexpected number of node objects for node e2e. Expects only one node: %+v" , nodeList )
}
2020-04-07 07:09:45 -04:00
cgroupName := "kubepods"
if currentConfig . CgroupDriver == "systemd" {
cgroupName = "kubepods.slice"
}
2017-11-16 14:16:58 -05:00
node := nodeList . Items [ 0 ]
capacity := node . Status . Capacity
2019-02-01 17:15:08 -05:00
allocatableCPU , allocatableMemory , allocatablePIDs := getAllocatableLimits ( "200m" , "200Mi" , "1738" , capacity )
2017-11-16 14:16:58 -05:00
// Total Memory reservation is 200Mi excluding eviction thresholds.
// Expect CPU shares on node allocatable cgroup to equal allocatable.
2020-04-07 07:09:45 -04:00
shares := int64 ( cm . MilliCPUToShares ( allocatableCPU . MilliValue ( ) ) )
if IsCgroup2UnifiedMode ( ) {
// convert to the cgroup v2 cpu.weight value
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupName , "cpu.weight" ) , convertSharesToWeight ( shares ) , 10 ) ; err != nil {
return err
}
} else {
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupName , "cpu.shares" ) , shares , 10 ) ; err != nil {
return err
}
2017-11-16 14:16:58 -05:00
}
// Expect Memory limit on node allocatable cgroup to equal allocatable.
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "memory" ] , cgroupName , memoryLimitFile ) , allocatableMemory . Value ( ) , 0 ) ; err != nil {
2017-11-16 14:16:58 -05:00
return err
}
2019-02-01 17:15:08 -05:00
// Expect PID limit on node allocatable cgroup to equal allocatable.
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "pids" ] , cgroupName , "pids.max" ) , allocatablePIDs . Value ( ) , 0 ) ; err != nil {
2019-02-01 17:15:08 -05:00
return err
}
2017-02-10 00:14:10 -05:00
2017-11-16 14:16:58 -05:00
// Check that Allocatable reported to scheduler includes eviction thresholds.
schedulerAllocatable := node . Status . Allocatable
// Memory allocatable should take into account eviction thresholds.
2019-02-01 17:15:08 -05:00
// Process IDs are not a scheduler resource and as such cannot be tested here.
allocatableCPU , allocatableMemory , _ = getAllocatableLimits ( "200m" , "300Mi" , "1738" , capacity )
2017-11-16 14:16:58 -05:00
// Expect allocatable to include all resources in capacity.
if len ( schedulerAllocatable ) != len ( capacity ) {
return fmt . Errorf ( "Expected all resources in capacity to be found in allocatable" )
}
// CPU based evictions are not supported.
if allocatableCPU . Cmp ( schedulerAllocatable [ v1 . ResourceCPU ] ) != 0 {
return fmt . Errorf ( "Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v" , allocatableCPU , schedulerAllocatable [ v1 . ResourceCPU ] , capacity [ v1 . ResourceCPU ] )
}
if allocatableMemory . Cmp ( schedulerAllocatable [ v1 . ResourceMemory ] ) != 0 {
return fmt . Errorf ( "Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v" , allocatableMemory , schedulerAllocatable [ v1 . ResourceMemory ] , capacity [ v1 . ResourceMemory ] )
}
return nil
2019-07-28 00:49:36 -04:00
} , time . Minute , 5 * time . Second ) . Should ( gomega . BeNil ( ) )
2017-02-10 00:14:10 -05:00
2020-04-07 07:09:45 -04:00
cgroupPath := ""
if currentConfig . CgroupDriver == "systemd" {
cgroupPath = cm . ParseSystemdToCgroupName ( kubeReservedCgroup ) . ToSystemd ( )
} else {
cgroupPath = cgroupManager . Name ( cm . NewCgroupName ( cm . RootCgroupName , kubeReservedCgroup ) )
2017-02-10 00:14:10 -05:00
}
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
2017-08-15 03:56:18 -04:00
kubeReservedCPU := resource . MustParse ( currentConfig . KubeReserved [ string ( v1 . ResourceCPU ) ] )
2020-04-07 07:09:45 -04:00
shares := int64 ( cm . MilliCPUToShares ( kubeReservedCPU . MilliValue ( ) ) )
if IsCgroup2UnifiedMode ( ) {
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupPath , "cpu.weight" ) , convertSharesToWeight ( shares ) , 10 ) ; err != nil {
return err
}
} else {
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupPath , "cpu.shares" ) , shares , 10 ) ; err != nil {
return err
}
2017-02-10 00:14:10 -05:00
}
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
2017-08-15 03:56:18 -04:00
kubeReservedMemory := resource . MustParse ( currentConfig . KubeReserved [ string ( v1 . ResourceMemory ) ] )
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "memory" ] , cgroupPath , memoryLimitFile ) , kubeReservedMemory . Value ( ) , 0 ) ; err != nil {
2017-02-10 00:14:10 -05:00
return err
}
2019-02-01 17:15:08 -05:00
// Expect process ID limit kube reserved cgroup to equal configured value `738`.
kubeReservedPIDs := resource . MustParse ( currentConfig . KubeReserved [ string ( pidlimit . PIDs ) ] )
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "pids" ] , cgroupPath , "pids.max" ) , kubeReservedPIDs . Value ( ) , 0 ) ; err != nil {
2019-02-01 17:15:08 -05:00
return err
}
2020-04-07 07:09:45 -04:00
if currentConfig . CgroupDriver == "systemd" {
cgroupPath = cm . ParseSystemdToCgroupName ( systemReservedCgroup ) . ToSystemd ( )
} else {
cgroupPath = cgroupManager . Name ( cm . NewCgroupName ( cm . RootCgroupName , systemReservedCgroup ) )
2017-02-10 00:14:10 -05:00
}
2020-04-07 07:09:45 -04:00
2017-02-10 00:14:10 -05:00
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
2017-08-15 03:56:18 -04:00
systemReservedCPU := resource . MustParse ( currentConfig . SystemReserved [ string ( v1 . ResourceCPU ) ] )
2020-04-07 07:09:45 -04:00
shares = int64 ( cm . MilliCPUToShares ( systemReservedCPU . MilliValue ( ) ) )
if IsCgroup2UnifiedMode ( ) {
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupPath , "cpu.weight" ) , convertSharesToWeight ( shares ) , 10 ) ; err != nil {
return err
}
} else {
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "cpu" ] , cgroupPath , "cpu.shares" ) , shares , 10 ) ; err != nil {
return err
}
2017-02-10 00:14:10 -05:00
}
// Expect Memory limit on node allocatable cgroup to equal allocatable.
2017-08-15 03:56:18 -04:00
systemReservedMemory := resource . MustParse ( currentConfig . SystemReserved [ string ( v1 . ResourceMemory ) ] )
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "memory" ] , cgroupPath , memoryLimitFile ) , systemReservedMemory . Value ( ) , 0 ) ; err != nil {
2017-02-10 00:14:10 -05:00
return err
}
2019-02-01 17:15:08 -05:00
// Expect process ID limit system reserved cgroup to equal configured value `1000`.
systemReservedPIDs := resource . MustParse ( currentConfig . SystemReserved [ string ( pidlimit . PIDs ) ] )
2020-04-07 07:09:45 -04:00
if err := expectFileValToEqual ( filepath . Join ( subsystems . MountPoints [ "pids" ] , cgroupPath , "pids.max" ) , systemReservedPIDs . Value ( ) , 0 ) ; err != nil {
2019-02-01 17:15:08 -05:00
return err
}
2017-02-10 00:14:10 -05:00
return nil
}