2015-09-18 16:35:56 -04:00
/ *
2016-07-05 03:29:09 -04:00
Copyright 2016 The Kubernetes Authors .
2015-09-18 16:35:56 -04:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2016-07-05 03:29:09 -04:00
package util
2015-09-18 16:35:56 -04:00
import (
2020-02-07 21:16:47 -05:00
"context"
2015-09-18 16:35:56 -04:00
"fmt"
2018-07-24 16:20:39 -04:00
"math"
2016-07-05 03:29:09 -04:00
"sort"
2016-01-20 18:48:52 -05:00
"strconv"
2016-10-06 11:02:51 -04:00
"strings"
2015-11-11 18:22:57 -05:00
"time"
2015-09-18 16:35:56 -04:00
2018-03-19 19:47:20 -04:00
apps "k8s.io/api/apps/v1"
2019-01-18 18:16:11 -05:00
v1 "k8s.io/api/core/v1"
2017-01-25 08:39:54 -05:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
2017-01-11 09:09:48 -05:00
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2019-12-03 02:00:34 -05:00
"k8s.io/apimachinery/pkg/labels"
2017-01-11 09:09:48 -05:00
"k8s.io/apimachinery/pkg/runtime"
2017-02-26 18:26:33 -05:00
"k8s.io/apimachinery/pkg/types"
2017-01-27 15:42:17 -05:00
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
2017-01-11 09:09:48 -05:00
"k8s.io/apimachinery/pkg/util/wait"
2018-03-19 19:47:20 -04:00
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
2019-12-03 02:00:34 -05:00
appslisters "k8s.io/client-go/listers/apps/v1"
2020-04-17 15:25:06 -04:00
"k8s.io/klog/v2"
2016-03-04 19:32:32 -05:00
"k8s.io/kubernetes/pkg/controller"
2016-01-12 18:37:51 -05:00
labelsutil "k8s.io/kubernetes/pkg/util/labels"
2019-01-24 11:34:33 -05:00
"k8s.io/utils/integer"
2015-09-18 16:35:56 -04:00
)
2016-01-12 20:52:18 -05:00
const (
2016-07-05 03:29:09 -04:00
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
2016-01-12 20:52:18 -05:00
RevisionAnnotation = "deployment.kubernetes.io/revision"
2016-10-06 11:02:51 -04:00
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
2016-01-28 11:35:14 -05:00
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
2016-01-19 17:50:03 -05:00
2016-07-05 03:29:09 -04:00
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
2016-01-19 17:50:03 -05:00
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
2016-07-05 03:29:09 -04:00
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
2019-01-18 18:16:11 -05:00
2016-09-15 11:57:53 -04:00
// Reasons for deployment conditions
//
// Progressing:
2019-01-18 18:16:11 -05:00
2016-09-15 11:57:53 -04:00
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
// of the rollout process.
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
FailedRSCreateReason = "ReplicaSetCreateError"
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
NewReplicaSetReason = "NewReplicaSetCreated"
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
FoundNewRSReason = "FoundNewReplicaSet"
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
// is at least the minimum available pods that need to run for the deployment.
NewRSAvailableReason = "NewReplicaSetAvailable"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
// estimated once a deployment is paused.
PausedDeployReason = "DeploymentPaused"
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
// deployments that paused amidst a rollout and are bounded by a deadline.
ResumedDeployReason = "DeploymentResumed"
//
// Available:
2019-01-18 18:16:11 -05:00
2016-09-15 11:57:53 -04:00
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
MinimumReplicasAvailable = "MinimumReplicasAvailable"
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
// available.
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
2016-01-12 20:52:18 -05:00
)
2016-09-15 11:57:53 -04:00
// NewDeploymentCondition creates a new deployment condition.
2018-03-19 19:47:20 -04:00
func NewDeploymentCondition ( condType apps . DeploymentConditionType , status v1 . ConditionStatus , reason , message string ) * apps . DeploymentCondition {
return & apps . DeploymentCondition {
2016-09-15 11:57:53 -04:00
Type : condType ,
Status : status ,
2016-12-03 13:57:26 -05:00
LastUpdateTime : metav1 . Now ( ) ,
LastTransitionTime : metav1 . Now ( ) ,
2016-09-15 11:57:53 -04:00
Reason : reason ,
Message : message ,
}
}
// GetDeploymentCondition returns the condition with the provided type.
2018-03-19 19:47:20 -04:00
func GetDeploymentCondition ( status apps . DeploymentStatus , condType apps . DeploymentConditionType ) * apps . DeploymentCondition {
2016-09-15 11:57:53 -04:00
for i := range status . Conditions {
c := status . Conditions [ i ]
if c . Type == condType {
return & c
}
}
return nil
}
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
2018-03-19 19:47:20 -04:00
func SetDeploymentCondition ( status * apps . DeploymentStatus , condition apps . DeploymentCondition ) {
2016-09-15 11:57:53 -04:00
currentCond := GetDeploymentCondition ( * status , condition . Type )
if currentCond != nil && currentCond . Status == condition . Status && currentCond . Reason == condition . Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond . Status == condition . Status {
condition . LastTransitionTime = currentCond . LastTransitionTime
}
newConditions := filterOutCondition ( status . Conditions , condition . Type )
status . Conditions = append ( newConditions , condition )
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
2018-03-19 19:47:20 -04:00
func RemoveDeploymentCondition ( status * apps . DeploymentStatus , condType apps . DeploymentConditionType ) {
2016-09-15 11:57:53 -04:00
status . Conditions = filterOutCondition ( status . Conditions , condType )
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
2018-03-19 19:47:20 -04:00
func filterOutCondition ( conditions [ ] apps . DeploymentCondition , condType apps . DeploymentConditionType ) [ ] apps . DeploymentCondition {
var newConditions [ ] apps . DeploymentCondition
2016-09-15 11:57:53 -04:00
for _ , c := range conditions {
if c . Type == condType {
continue
}
newConditions = append ( newConditions , c )
}
return newConditions
}
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
2018-03-19 19:47:20 -04:00
func ReplicaSetToDeploymentCondition ( cond apps . ReplicaSetCondition ) apps . DeploymentCondition {
return apps . DeploymentCondition {
Type : apps . DeploymentConditionType ( cond . Type ) ,
2016-09-15 11:57:53 -04:00
Status : cond . Status ,
LastTransitionTime : cond . LastTransitionTime ,
LastUpdateTime : cond . LastTransitionTime ,
Reason : cond . Reason ,
Message : cond . Message ,
}
}
2016-10-06 11:02:51 -04:00
// SetDeploymentRevision updates the revision for a deployment.
2018-03-19 19:47:20 -04:00
func SetDeploymentRevision ( deployment * apps . Deployment , revision string ) bool {
2016-10-06 11:02:51 -04:00
updated := false
if deployment . Annotations == nil {
deployment . Annotations = make ( map [ string ] string )
}
if deployment . Annotations [ RevisionAnnotation ] != revision {
deployment . Annotations [ RevisionAnnotation ] = revision
updated = true
}
return updated
}
2016-07-05 03:29:09 -04:00
// MaxRevision finds the highest revision in the replica sets
2018-03-19 19:47:20 -04:00
func MaxRevision ( allRSs [ ] * apps . ReplicaSet ) int64 {
2016-07-05 03:29:09 -04:00
max := int64 ( 0 )
for _ , rs := range allRSs {
if v , err := Revision ( rs ) ; err != nil {
// Skip the replica sets when it failed to parse their revision information
2018-11-09 13:49:10 -05:00
klog . V ( 4 ) . Infof ( "Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions." , err , rs )
2016-07-05 03:29:09 -04:00
} else if v > max {
max = v
}
}
return max
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
2018-03-19 19:47:20 -04:00
func LastRevision ( allRSs [ ] * apps . ReplicaSet ) int64 {
2016-07-05 03:29:09 -04:00
max , secMax := int64 ( 0 ) , int64 ( 0 )
for _ , rs := range allRSs {
if v , err := Revision ( rs ) ; err != nil {
// Skip the replica sets when it failed to parse their revision information
2018-11-09 13:49:10 -05:00
klog . V ( 4 ) . Infof ( "Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions." , err , rs )
2016-07-05 03:29:09 -04:00
} else if v >= max {
secMax = max
max = v
} else if v > secMax {
secMax = v
}
}
return secMax
}
2016-10-10 07:07:38 -04:00
// Revision returns the revision number of the input object.
func Revision ( obj runtime . Object ) ( int64 , error ) {
acc , err := meta . Accessor ( obj )
if err != nil {
return 0 , err
}
v , ok := acc . GetAnnotations ( ) [ RevisionAnnotation ]
2016-10-06 11:02:51 -04:00
if ! ok {
return 0 , nil
}
return strconv . ParseInt ( v , 10 , 64 )
}
2016-07-05 03:29:09 -04:00
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
2019-05-08 13:14:16 -04:00
func SetNewReplicaSetAnnotations ( deployment * apps . Deployment , newRS * apps . ReplicaSet , newRevision string , exists bool , revHistoryLimitInChars int ) bool {
2016-07-05 03:29:09 -04:00
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet ( deployment , newRS )
// Then, update replica set's revision annotation
if newRS . Annotations == nil {
newRS . Annotations = make ( map [ string ] string )
}
2016-10-06 11:02:51 -04:00
oldRevision , ok := newRS . Annotations [ RevisionAnnotation ]
2016-07-05 03:29:09 -04:00
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
2017-07-21 01:12:02 -04:00
oldRevisionInt , err := strconv . ParseInt ( oldRevision , 10 , 64 )
if err != nil {
if oldRevision != "" {
2018-11-09 13:49:10 -05:00
klog . Warningf ( "Updating replica set revision OldRevision not int %s" , err )
2017-07-21 01:12:02 -04:00
return false
}
//If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt , err := strconv . ParseInt ( newRevision , 10 , 64 )
if err != nil {
2018-11-09 13:49:10 -05:00
klog . Warningf ( "Updating replica set revision NewRevision not int %s" , err )
2017-07-21 01:12:02 -04:00
return false
}
if oldRevisionInt < newRevisionInt {
2016-07-05 03:29:09 -04:00
newRS . Annotations [ RevisionAnnotation ] = newRevision
annotationChanged = true
2018-11-09 13:49:10 -05:00
klog . V ( 4 ) . Infof ( "Updating replica set %q revision to %s" , newRS . Name , newRevision )
2016-07-05 03:29:09 -04:00
}
2016-10-06 11:02:51 -04:00
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
// for historical information.
2019-05-08 13:14:16 -04:00
if ok && oldRevisionInt < newRevisionInt {
2016-10-06 11:02:51 -04:00
revisionHistoryAnnotation := newRS . Annotations [ RevisionHistoryAnnotation ]
oldRevisions := strings . Split ( revisionHistoryAnnotation , "," )
if len ( oldRevisions [ 0 ] ) == 0 {
newRS . Annotations [ RevisionHistoryAnnotation ] = oldRevision
} else {
2019-05-08 13:14:16 -04:00
totalLen := len ( revisionHistoryAnnotation ) + len ( oldRevision ) + 1
// index for the starting position in oldRevisions
start := 0
for totalLen > revHistoryLimitInChars && start < len ( oldRevisions ) {
totalLen = totalLen - len ( oldRevisions [ start ] ) - 1
start ++
}
if totalLen <= revHistoryLimitInChars {
oldRevisions = append ( oldRevisions [ start : ] , oldRevision )
newRS . Annotations [ RevisionHistoryAnnotation ] = strings . Join ( oldRevisions , "," )
} else {
klog . Warningf ( "Not appending revision due to length limit of %v reached" , revHistoryLimitInChars )
}
2016-10-06 11:02:51 -04:00
}
}
// If the new replica set is about to be created, we need to add replica annotations to it.
2016-11-18 15:50:17 -05:00
if ! exists && SetReplicasAnnotations ( newRS , * ( deployment . Spec . Replicas ) , * ( deployment . Spec . Replicas ) + MaxSurge ( * deployment ) ) {
2016-07-05 03:29:09 -04:00
annotationChanged = true
}
return annotationChanged
}
var annotationsToSkip = map [ string ] bool {
2017-05-16 18:30:29 -04:00
v1 . LastAppliedConfigAnnotation : true ,
RevisionAnnotation : true ,
RevisionHistoryAnnotation : true ,
DesiredReplicasAnnotation : true ,
MaxReplicasAnnotation : true ,
2018-03-19 19:47:20 -04:00
apps . DeprecatedRollbackTo : true ,
2016-07-05 03:29:09 -04:00
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation ( key string ) bool {
return annotationsToSkip [ key ]
}
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
2018-03-19 19:47:20 -04:00
func copyDeploymentAnnotationsToReplicaSet ( deployment * apps . Deployment , rs * apps . ReplicaSet ) bool {
2016-07-05 03:29:09 -04:00
rsAnnotationsChanged := false
if rs . Annotations == nil {
rs . Annotations = make ( map [ string ] string )
}
for k , v := range deployment . Annotations {
// newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated
// by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of
// deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable.
2020-09-17 11:36:35 -04:00
if _ , exist := rs . Annotations [ k ] ; skipCopyAnnotation ( k ) || ( exist && rs . Annotations [ k ] == v ) {
2016-07-05 03:29:09 -04:00
continue
}
rs . Annotations [ k ] = v
rsAnnotationsChanged = true
}
return rsAnnotationsChanged
}
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
2018-03-19 19:47:20 -04:00
func SetDeploymentAnnotationsTo ( deployment * apps . Deployment , rollbackToRS * apps . ReplicaSet ) {
2016-07-05 03:29:09 -04:00
deployment . Annotations = getSkippedAnnotations ( deployment . Annotations )
for k , v := range rollbackToRS . Annotations {
if ! skipCopyAnnotation ( k ) {
deployment . Annotations [ k ] = v
}
}
}
func getSkippedAnnotations ( annotations map [ string ] string ) map [ string ] string {
skippedAnnotations := make ( map [ string ] string )
for k , v := range annotations {
if skipCopyAnnotation ( k ) {
skippedAnnotations [ k ] = v
}
}
return skippedAnnotations
}
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
2018-03-19 19:47:20 -04:00
func FindActiveOrLatest ( newRS * apps . ReplicaSet , oldRSs [ ] * apps . ReplicaSet ) * apps . ReplicaSet {
2016-07-05 03:29:09 -04:00
if newRS == nil && len ( oldRSs ) == 0 {
return nil
}
sort . Sort ( sort . Reverse ( controller . ReplicaSetsByCreationTimestamp ( oldRSs ) ) )
allRSs := controller . FilterActiveReplicaSets ( append ( oldRSs , newRS ) )
switch len ( allRSs ) {
case 0 :
// If there is no active replica set then we should return the newest.
if newRS != nil {
return newRS
}
return oldRSs [ 0 ]
case 1 :
return allRSs [ 0 ]
default :
return nil
}
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
2018-03-19 19:47:20 -04:00
func GetDesiredReplicasAnnotation ( rs * apps . ReplicaSet ) ( int32 , bool ) {
2016-07-05 03:29:09 -04:00
return getIntFromAnnotation ( rs , DesiredReplicasAnnotation )
}
2018-03-19 19:47:20 -04:00
func getMaxReplicasAnnotation ( rs * apps . ReplicaSet ) ( int32 , bool ) {
2016-07-05 03:29:09 -04:00
return getIntFromAnnotation ( rs , MaxReplicasAnnotation )
}
2018-03-19 19:47:20 -04:00
func getIntFromAnnotation ( rs * apps . ReplicaSet , annotationKey string ) ( int32 , bool ) {
2016-07-05 03:29:09 -04:00
annotationValue , ok := rs . Annotations [ annotationKey ]
if ! ok {
return int32 ( 0 ) , false
}
intValue , err := strconv . Atoi ( annotationValue )
if err != nil {
2018-11-09 13:49:10 -05:00
klog . V ( 2 ) . Infof ( "Cannot convert the value %q with annotation key %q for the replica set %q" , annotationValue , annotationKey , rs . Name )
2016-07-05 03:29:09 -04:00
return int32 ( 0 ) , false
}
return int32 ( intValue ) , true
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
2018-03-19 19:47:20 -04:00
func SetReplicasAnnotations ( rs * apps . ReplicaSet , desiredReplicas , maxReplicas int32 ) bool {
2016-07-05 03:29:09 -04:00
updated := false
if rs . Annotations == nil {
rs . Annotations = make ( map [ string ] string )
}
desiredString := fmt . Sprintf ( "%d" , desiredReplicas )
if hasString := rs . Annotations [ DesiredReplicasAnnotation ] ; hasString != desiredString {
rs . Annotations [ DesiredReplicasAnnotation ] = desiredString
updated = true
}
maxString := fmt . Sprintf ( "%d" , maxReplicas )
if hasString := rs . Annotations [ MaxReplicasAnnotation ] ; hasString != maxString {
rs . Annotations [ MaxReplicasAnnotation ] = maxString
updated = true
}
return updated
}
2019-01-18 18:16:11 -05:00
// ReplicasAnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
2018-03-19 19:47:20 -04:00
func ReplicasAnnotationsNeedUpdate ( rs * apps . ReplicaSet , desiredReplicas , maxReplicas int32 ) bool {
2018-04-01 21:27:11 -04:00
if rs . Annotations == nil {
return true
}
desiredString := fmt . Sprintf ( "%d" , desiredReplicas )
if hasString := rs . Annotations [ DesiredReplicasAnnotation ] ; hasString != desiredString {
return true
}
maxString := fmt . Sprintf ( "%d" , maxReplicas )
if hasString := rs . Annotations [ MaxReplicasAnnotation ] ; hasString != maxString {
return true
}
return false
}
2016-07-05 03:29:09 -04:00
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
2018-03-19 19:47:20 -04:00
func MaxUnavailable ( deployment apps . Deployment ) int32 {
2017-04-19 08:40:31 -04:00
if ! IsRollingUpdate ( & deployment ) || * ( deployment . Spec . Replicas ) == 0 {
2016-07-05 03:29:09 -04:00
return int32 ( 0 )
}
// Error caught by validation
2016-11-18 15:50:17 -05:00
_ , maxUnavailable , _ := ResolveFenceposts ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , deployment . Spec . Strategy . RollingUpdate . MaxUnavailable , * ( deployment . Spec . Replicas ) )
2017-03-30 05:11:52 -04:00
if maxUnavailable > * deployment . Spec . Replicas {
return * deployment . Spec . Replicas
}
2016-07-05 03:29:09 -04:00
return maxUnavailable
}
2016-11-29 05:20:09 -05:00
// MinAvailable returns the minimum available pods of a given deployment
2018-03-19 19:47:20 -04:00
func MinAvailable ( deployment * apps . Deployment ) int32 {
2016-08-01 18:26:17 -04:00
if ! IsRollingUpdate ( deployment ) {
return int32 ( 0 )
}
2016-11-18 15:50:17 -05:00
return * ( deployment . Spec . Replicas ) - MaxUnavailable ( * deployment )
2016-08-01 18:26:17 -04:00
}
2016-07-05 03:29:09 -04:00
// MaxSurge returns the maximum surge pods a rolling deployment can take.
2018-03-19 19:47:20 -04:00
func MaxSurge ( deployment apps . Deployment ) int32 {
2016-07-05 03:29:09 -04:00
if ! IsRollingUpdate ( & deployment ) {
return int32 ( 0 )
}
// Error caught by validation
2016-11-18 15:50:17 -05:00
maxSurge , _ , _ := ResolveFenceposts ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , deployment . Spec . Strategy . RollingUpdate . MaxUnavailable , * ( deployment . Spec . Replicas ) )
2016-07-05 03:29:09 -04:00
return maxSurge
}
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
2018-03-19 19:47:20 -04:00
func GetProportion ( rs * apps . ReplicaSet , d apps . Deployment , deploymentReplicasToAdd , deploymentReplicasAdded int32 ) int32 {
2016-11-18 15:50:17 -05:00
if rs == nil || * ( rs . Spec . Replicas ) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
2016-07-05 03:29:09 -04:00
return int32 ( 0 )
}
rsFraction := getReplicaSetFraction ( * rs , d )
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
// Use the minimum between the replica set fraction and the maximum allowed replicas
// when scaling up. This way we ensure we will not scale up more than the allowed
// replicas we can add.
return integer . Int32Min ( rsFraction , allowed )
}
// Use the maximum between the replica set fraction and the maximum allowed replicas
// when scaling down. This way we ensure we will not scale down more than the allowed
// replicas we can remove.
return integer . Int32Max ( rsFraction , allowed )
}
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
2018-03-19 19:47:20 -04:00
func getReplicaSetFraction ( rs apps . ReplicaSet , d apps . Deployment ) int32 {
2016-07-05 03:29:09 -04:00
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
2016-11-18 15:50:17 -05:00
if * ( d . Spec . Replicas ) == int32 ( 0 ) {
return - * ( rs . Spec . Replicas )
2016-07-05 03:29:09 -04:00
}
2016-11-18 15:50:17 -05:00
deploymentReplicas := * ( d . Spec . Replicas ) + MaxSurge ( d )
2016-07-05 03:29:09 -04:00
annotatedReplicas , ok := getMaxReplicasAnnotation ( & rs )
if ! ok {
// If we cannot find the annotation then fallback to the current deployment size. Note that this
// will not be an accurate proportion estimation in case other replica sets have different values
// which means that the deployment was scaled at some point but we at least will stay in limits
// due to the min-max comparisons in getProportion.
annotatedReplicas = d . Status . Replicas
}
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
// will never be zero here.
2016-11-18 15:50:17 -05:00
newRSsize := ( float64 ( * ( rs . Spec . Replicas ) * deploymentReplicas ) ) / float64 ( annotatedReplicas )
return integer . RoundToInt32 ( newRSsize ) - * ( rs . Spec . Replicas )
2016-07-05 03:29:09 -04:00
}
2017-06-04 22:04:13 -04:00
// RsListFromClient returns an rsListFunc that wraps the given client.
2018-03-19 19:47:20 -04:00
func RsListFromClient ( c appsclient . AppsV1Interface ) RsListFunc {
return func ( namespace string , options metav1 . ListOptions ) ( [ ] * apps . ReplicaSet , error ) {
2020-02-07 21:16:47 -05:00
rsList , err := c . ReplicaSets ( namespace ) . List ( context . TODO ( ) , options )
2017-03-14 20:39:29 -04:00
if err != nil {
return nil , err
}
2018-03-19 19:47:20 -04:00
var ret [ ] * apps . ReplicaSet
2017-03-14 20:39:29 -04:00
for i := range rsList . Items {
ret = append ( ret , & rsList . Items [ i ] )
}
return ret , err
}
}
2019-01-18 18:16:11 -05:00
// TODO: switch RsListFunc and podListFunc to full namespacers
// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
2018-03-19 19:47:20 -04:00
type RsListFunc func ( string , metav1 . ListOptions ) ( [ ] * apps . ReplicaSet , error )
2019-01-18 18:16:11 -05:00
// podListFunc returns the PodList from the Pod namespace and the List metav1.ListOptions.
2017-01-21 22:36:02 -05:00
type podListFunc func ( string , metav1 . ListOptions ) ( * v1 . PodList , error )
2016-02-10 20:49:11 -05:00
2016-03-14 15:07:56 -04:00
// ListReplicaSets returns a slice of RSes the given deployment targets.
2017-02-26 18:26:33 -05:00
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
2018-03-19 19:47:20 -04:00
func ListReplicaSets ( deployment * apps . Deployment , getRSList RsListFunc ) ( [ ] * apps . ReplicaSet , error ) {
2016-03-14 15:07:56 -04:00
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
2017-02-26 18:26:33 -05:00
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
2016-03-14 15:07:56 -04:00
namespace := deployment . Namespace
2016-12-03 13:57:26 -05:00
selector , err := metav1 . LabelSelectorAsSelector ( deployment . Spec . Selector )
2016-03-14 15:07:56 -04:00
if err != nil {
return nil , err
}
2017-01-21 22:36:02 -05:00
options := metav1 . ListOptions { LabelSelector : selector . String ( ) }
2017-02-26 18:26:33 -05:00
all , err := getRSList ( namespace , options )
if err != nil {
2017-09-23 16:30:57 -04:00
return nil , err
2017-02-26 18:26:33 -05:00
}
// Only include those whose ControllerRef matches the Deployment.
2018-03-19 19:47:20 -04:00
owned := make ( [ ] * apps . ReplicaSet , 0 , len ( all ) )
2017-02-26 18:26:33 -05:00
for _ , rs := range all {
2017-08-02 06:36:58 -04:00
if metav1 . IsControlledBy ( rs , deployment ) {
2017-02-26 18:26:33 -05:00
owned = append ( owned , rs )
}
}
return owned , nil
2016-03-14 15:07:56 -04:00
}
// ListPods returns a list of pods the given deployment targets.
2017-02-26 18:26:33 -05:00
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
2018-03-19 19:47:20 -04:00
func ListPods ( deployment * apps . Deployment , rsList [ ] * apps . ReplicaSet , getPodList podListFunc ) ( * v1 . PodList , error ) {
2016-03-14 15:07:56 -04:00
namespace := deployment . Namespace
2016-12-03 13:57:26 -05:00
selector , err := metav1 . LabelSelectorAsSelector ( deployment . Spec . Selector )
2016-03-14 15:07:56 -04:00
if err != nil {
return nil , err
}
2017-01-21 22:36:02 -05:00
options := metav1 . ListOptions { LabelSelector : selector . String ( ) }
2017-02-26 18:26:33 -05:00
all , err := getPodList ( namespace , options )
if err != nil {
return all , err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make ( map [ types . UID ] bool , len ( rsList ) )
for _ , rs := range rsList {
rsMap [ rs . UID ] = true
}
owned := & v1 . PodList { Items : make ( [ ] v1 . Pod , 0 , len ( all . Items ) ) }
for i := range all . Items {
pod := & all . Items [ i ]
2017-08-02 05:41:33 -04:00
controllerRef := metav1 . GetControllerOf ( pod )
2017-02-26 18:26:33 -05:00
if controllerRef != nil && rsMap [ controllerRef . UID ] {
owned . Items = append ( owned . Items , * pod )
}
}
return owned , nil
2016-03-14 15:07:56 -04:00
}
2016-12-01 04:10:30 -05:00
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
2018-04-04 20:14:07 -04:00
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
2017-09-23 16:30:57 -04:00
func EqualIgnoreHash ( template1 , template2 * v1 . PodTemplateSpec ) bool {
2017-08-15 08:14:21 -04:00
t1Copy := template1 . DeepCopy ( )
t2Copy := template2 . DeepCopy ( )
2018-04-04 20:14:07 -04:00
// Remove hash labels from template.Labels before comparing
2018-03-19 19:47:20 -04:00
delete ( t1Copy . Labels , apps . DefaultDeploymentUniqueLabelKey )
delete ( t2Copy . Labels , apps . DefaultDeploymentUniqueLabelKey )
2017-09-23 16:30:57 -04:00
return apiequality . Semantic . DeepEqual ( t1Copy , t2Copy )
2016-06-03 13:53:14 -04:00
}
2016-03-14 15:07:56 -04:00
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
2018-03-19 19:47:20 -04:00
func FindNewReplicaSet ( deployment * apps . Deployment , rsList [ ] * apps . ReplicaSet ) * apps . ReplicaSet {
2017-02-21 19:00:24 -05:00
sort . Sort ( controller . ReplicaSetsByCreationTimestamp ( rsList ) )
2016-03-14 15:07:56 -04:00
for i := range rsList {
2017-09-23 16:30:57 -04:00
if EqualIgnoreHash ( & rsList [ i ] . Spec . Template , & deployment . Spec . Template ) {
2017-02-21 19:00:24 -05:00
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
2017-09-23 16:30:57 -04:00
return rsList [ i ]
2016-03-14 15:07:56 -04:00
}
}
// new ReplicaSet does not exist.
2017-09-23 16:30:57 -04:00
return nil
2016-03-14 15:07:56 -04:00
}
2017-03-22 05:26:13 -04:00
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
2016-01-19 19:40:18 -05:00
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
2018-03-19 19:47:20 -04:00
func FindOldReplicaSets ( deployment * apps . Deployment , rsList [ ] * apps . ReplicaSet ) ( [ ] * apps . ReplicaSet , [ ] * apps . ReplicaSet ) {
var requiredRSs [ ] * apps . ReplicaSet
var allRSs [ ] * apps . ReplicaSet
2017-09-23 16:30:57 -04:00
newRS := FindNewReplicaSet ( deployment , rsList )
2017-03-22 05:26:13 -04:00
for _ , rs := range rsList {
// Filter out new replica set
if newRS != nil && rs . UID == newRS . UID {
continue
}
allRSs = append ( allRSs , rs )
if * ( rs . Spec . Replicas ) != 0 {
requiredRSs = append ( requiredRSs , rs )
}
2016-01-12 20:52:18 -05:00
}
2017-09-23 16:30:57 -04:00
return requiredRSs , allRSs
2015-09-18 16:35:56 -04:00
}
2016-01-19 19:40:18 -05:00
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
2018-03-19 19:47:20 -04:00
func SetFromReplicaSetTemplate ( deployment * apps . Deployment , template v1 . PodTemplateSpec ) * apps . Deployment {
2016-01-14 21:04:05 -05:00
deployment . Spec . Template . ObjectMeta = template . ObjectMeta
deployment . Spec . Template . Spec = template . Spec
deployment . Spec . Template . ObjectMeta . Labels = labelsutil . CloneAndRemoveLabel (
deployment . Spec . Template . ObjectMeta . Labels ,
2018-03-19 19:47:20 -04:00
apps . DefaultDeploymentUniqueLabelKey )
2016-01-14 21:04:05 -05:00
return deployment
}
2016-07-05 03:29:09 -04:00
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
2018-03-19 19:47:20 -04:00
func GetReplicaCountForReplicaSets ( replicaSets [ ] * apps . ReplicaSet ) int32 {
2016-10-11 10:37:39 -04:00
totalReplicas := int32 ( 0 )
2016-01-19 19:40:18 -05:00
for _ , rs := range replicaSets {
2016-02-24 19:09:20 -05:00
if rs != nil {
2016-11-18 15:50:17 -05:00
totalReplicas += * ( rs . Spec . Replicas )
2016-02-24 19:09:20 -05:00
}
2015-09-29 19:55:06 -04:00
}
2016-10-11 10:37:39 -04:00
return totalReplicas
2015-09-29 19:55:06 -04:00
}
2016-02-22 17:28:28 -05:00
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
2018-03-19 19:47:20 -04:00
func GetActualReplicaCountForReplicaSets ( replicaSets [ ] * apps . ReplicaSet ) int32 {
2016-10-11 10:37:39 -04:00
totalActualReplicas := int32 ( 0 )
2016-02-22 17:28:28 -05:00
for _ , rs := range replicaSets {
2016-02-24 19:09:20 -05:00
if rs != nil {
2016-10-11 10:37:39 -04:00
totalActualReplicas += rs . Status . Replicas
2016-02-24 19:09:20 -05:00
}
2016-02-22 17:28:28 -05:00
}
2016-10-11 10:37:39 -04:00
return totalActualReplicas
2016-02-22 17:28:28 -05:00
}
2016-12-02 11:32:34 -05:00
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
2018-03-19 19:47:20 -04:00
func GetReadyReplicaCountForReplicaSets ( replicaSets [ ] * apps . ReplicaSet ) int32 {
2016-12-02 11:32:34 -05:00
totalReadyReplicas := int32 ( 0 )
for _ , rs := range replicaSets {
if rs != nil {
totalReadyReplicas += rs . Status . ReadyReplicas
}
}
return totalReadyReplicas
}
2016-10-11 10:37:39 -04:00
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
2018-03-19 19:47:20 -04:00
func GetAvailableReplicaCountForReplicaSets ( replicaSets [ ] * apps . ReplicaSet ) int32 {
2016-10-11 10:37:39 -04:00
totalAvailableReplicas := int32 ( 0 )
for _ , rs := range replicaSets {
if rs != nil {
totalAvailableReplicas += rs . Status . AvailableReplicas
2015-09-29 19:55:06 -04:00
}
}
2016-10-11 10:37:39 -04:00
return totalAvailableReplicas
2015-09-29 19:55:06 -04:00
}
2016-07-05 03:29:09 -04:00
// IsRollingUpdate returns true if the strategy type is a rolling update.
2018-03-19 19:47:20 -04:00
func IsRollingUpdate ( deployment * apps . Deployment ) bool {
return deployment . Spec . Strategy . Type == apps . RollingUpdateDeploymentStrategyType
2016-02-04 21:05:38 -05:00
}
2017-04-19 08:40:31 -04:00
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
2018-03-19 19:47:20 -04:00
func DeploymentComplete ( deployment * apps . Deployment , newStatus * apps . DeploymentStatus ) bool {
2016-11-18 15:50:17 -05:00
return newStatus . UpdatedReplicas == * ( deployment . Spec . Replicas ) &&
2017-01-21 16:54:21 -05:00
newStatus . Replicas == * ( deployment . Spec . Replicas ) &&
2017-04-19 08:40:31 -04:00
newStatus . AvailableReplicas == * ( deployment . Spec . Replicas ) &&
2016-11-10 11:59:30 -05:00
newStatus . ObservedGeneration >= deployment . Generation
2016-09-15 11:57:53 -04:00
}
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
2016-11-10 11:59:30 -05:00
// current with the new status of the deployment that the controller is observing. More specifically,
2017-05-20 15:14:50 -04:00
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
2018-03-19 19:47:20 -04:00
func DeploymentProgressing ( deployment * apps . Deployment , newStatus * apps . DeploymentStatus ) bool {
2016-09-15 11:57:53 -04:00
oldStatus := deployment . Status
// Old replicas that need to be scaled down
oldStatusOldReplicas := oldStatus . Replicas - oldStatus . UpdatedReplicas
newStatusOldReplicas := newStatus . Replicas - newStatus . UpdatedReplicas
2016-11-10 11:59:30 -05:00
return ( newStatus . UpdatedReplicas > oldStatus . UpdatedReplicas ) ||
( newStatusOldReplicas < oldStatusOldReplicas ) ||
2017-05-20 15:14:50 -04:00
newStatus . ReadyReplicas > deployment . Status . ReadyReplicas ||
2016-11-10 11:59:30 -05:00
newStatus . AvailableReplicas > deployment . Status . AvailableReplicas
2016-09-15 11:57:53 -04:00
}
// used for unit testing
var nowFn = func ( ) time . Time { return time . Now ( ) }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
2018-03-19 19:47:20 -04:00
func DeploymentTimedOut ( deployment * apps . Deployment , newStatus * apps . DeploymentStatus ) bool {
2018-07-24 16:20:39 -04:00
if ! HasProgressDeadline ( deployment ) {
2016-09-15 11:57:53 -04:00
return false
}
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
2018-03-19 19:47:20 -04:00
condition := GetDeploymentCondition ( * newStatus , apps . DeploymentProgressing )
2016-09-15 11:57:53 -04:00
if condition == nil {
return false
}
2017-09-07 19:26:48 -04:00
// If the previous condition has been a successful rollout then we shouldn't try to
// estimate any progress. Scenario:
//
// * progressDeadlineSeconds is smaller than the difference between now and the time
// the last rollout finished in the past.
// * the creation of a new ReplicaSet triggers a resync of the Deployment prior to the
// cached copy of the Deployment getting updated with the status.condition that indicates
// the creation of the new ReplicaSet.
//
// The Deployment will be resynced and eventually its Progressing condition will catch
// up with the state of the world.
if condition . Reason == NewRSAvailableReason {
return false
}
2016-09-15 11:57:53 -04:00
if condition . Reason == TimedOutReason {
return true
}
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
2016-11-08 05:41:53 -05:00
from := condition . LastUpdateTime
2017-02-10 08:14:32 -05:00
now := nowFn ( )
2016-09-15 11:57:53 -04:00
delta := time . Duration ( * deployment . Spec . ProgressDeadlineSeconds ) * time . Second
2017-02-10 08:14:32 -05:00
timedOut := from . Add ( delta ) . Before ( now )
2018-11-09 13:49:10 -05:00
klog . V ( 4 ) . Infof ( "Deployment %q timed out (%t) [last progress check: %v - now: %v]" , deployment . Name , timedOut , from , now )
2017-02-10 08:14:32 -05:00
return timedOut
2016-09-15 11:57:53 -04:00
}
2016-02-04 21:05:38 -05:00
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
2018-03-19 19:47:20 -04:00
func NewRSNewReplicas ( deployment * apps . Deployment , allRSs [ ] * apps . ReplicaSet , newRS * apps . ReplicaSet ) ( int32 , error ) {
2016-02-04 21:05:38 -05:00
switch deployment . Spec . Strategy . Type {
2018-03-19 19:47:20 -04:00
case apps . RollingUpdateDeploymentStrategyType :
2016-02-04 21:05:38 -05:00
// Check if we can scale up.
2020-03-22 19:41:45 -04:00
maxSurge , err := intstrutil . GetScaledValueFromIntOrPercent ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , int ( * ( deployment . Spec . Replicas ) ) , true )
2016-02-04 21:05:38 -05:00
if err != nil {
return 0 , err
}
// Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets ( allRSs )
2016-11-18 15:50:17 -05:00
maxTotalPods := * ( deployment . Spec . Replicas ) + int32 ( maxSurge )
2016-02-04 21:05:38 -05:00
if currentPodCount >= maxTotalPods {
// Cannot scale up.
2016-11-18 15:50:17 -05:00
return * ( newRS . Spec . Replicas ) , nil
2016-02-04 21:05:38 -05:00
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
2016-11-18 15:50:17 -05:00
scaleUpCount = int32 ( integer . IntMin ( int ( scaleUpCount ) , int ( * ( deployment . Spec . Replicas ) - * ( newRS . Spec . Replicas ) ) ) )
return * ( newRS . Spec . Replicas ) + scaleUpCount , nil
2018-03-19 19:47:20 -04:00
case apps . RecreateDeploymentStrategyType :
2016-11-18 15:50:17 -05:00
return * ( deployment . Spec . Replicas ) , nil
2016-02-04 21:05:38 -05:00
default :
return 0 , fmt . Errorf ( "deployment type %v isn't supported" , deployment . Spec . Strategy . Type )
}
}
2016-02-23 23:27:24 -05:00
2016-01-28 11:35:14 -05:00
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired
2017-04-18 11:32:06 -04:00
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
2018-03-19 19:47:20 -04:00
func IsSaturated ( deployment * apps . Deployment , rs * apps . ReplicaSet ) bool {
2016-01-28 11:35:14 -05:00
if rs == nil {
return false
}
desiredString := rs . Annotations [ DesiredReplicasAnnotation ]
desired , err := strconv . Atoi ( desiredString )
if err != nil {
return false
}
2017-04-18 11:32:06 -04:00
return * ( rs . Spec . Replicas ) == * ( deployment . Spec . Replicas ) &&
int32 ( desired ) == * ( deployment . Spec . Replicas ) &&
rs . Status . AvailableReplicas == * ( deployment . Spec . Replicas )
2016-01-28 11:35:14 -05:00
}
2016-07-05 03:29:09 -04:00
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
2016-02-23 23:27:24 -05:00
// Returns error if polling timesout.
2018-03-19 19:47:20 -04:00
func WaitForObservedDeployment ( getDeploymentFunc func ( ) ( * apps . Deployment , error ) , desiredGeneration int64 , interval , timeout time . Duration ) error {
2016-02-23 23:27:24 -05:00
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
2017-09-06 16:42:26 -04:00
return wait . PollImmediate ( interval , timeout , func ( ) ( bool , error ) {
2016-02-23 23:27:24 -05:00
deployment , err := getDeploymentFunc ( )
if err != nil {
return false , err
}
return deployment . Status . ObservedGeneration >= desiredGeneration , nil
} )
}
2016-03-04 05:29:55 -05:00
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
2016-04-27 00:35:14 -04:00
func ResolveFenceposts ( maxSurge , maxUnavailable * intstrutil . IntOrString , desired int32 ) ( int32 , int32 , error ) {
2020-03-22 19:41:45 -04:00
surge , err := intstrutil . GetScaledValueFromIntOrPercent ( intstrutil . ValueOrDefault ( maxSurge , intstrutil . FromInt ( 0 ) ) , int ( desired ) , true )
2016-03-04 05:29:55 -05:00
if err != nil {
return 0 , 0 , err
}
2020-03-22 19:41:45 -04:00
unavailable , err := intstrutil . GetScaledValueFromIntOrPercent ( intstrutil . ValueOrDefault ( maxUnavailable , intstrutil . FromInt ( 0 ) ) , int ( desired ) , false )
2016-03-04 05:29:55 -05:00
if err != nil {
return 0 , 0 , err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
2016-04-27 00:35:14 -04:00
return int32 ( surge ) , int32 ( unavailable ) , nil
2016-03-04 05:29:55 -05:00
}
2018-07-24 16:20:39 -04:00
2019-01-18 18:16:11 -05:00
// HasProgressDeadline checks if the Deployment d is expected to surface the reason
// "ProgressDeadlineExceeded" when the Deployment progress takes longer than expected time.
2018-07-24 16:20:39 -04:00
func HasProgressDeadline ( d * apps . Deployment ) bool {
return d . Spec . ProgressDeadlineSeconds != nil && * d . Spec . ProgressDeadlineSeconds != math . MaxInt32
}
2019-01-18 18:16:11 -05:00
// HasRevisionHistoryLimit checks if the Deployment d is expected to keep a specified number of
// old replicaSets. These replicaSets are mainly kept with the purpose of rollback.
// The RevisionHistoryLimit can start from 0 (no retained replicasSet). When set to math.MaxInt32,
// the Deployment will keep all revisions.
2018-08-09 22:12:43 -04:00
func HasRevisionHistoryLimit ( d * apps . Deployment ) bool {
return d . Spec . RevisionHistoryLimit != nil && * d . Spec . RevisionHistoryLimit != math . MaxInt32
}
2019-12-03 02:00:34 -05:00
// GetDeploymentsForReplicaSet returns a list of Deployments that potentially
// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef
// will actually manage it.
// Returns an error only if no matching Deployments are found.
func GetDeploymentsForReplicaSet ( deploymentLister appslisters . DeploymentLister , rs * apps . ReplicaSet ) ( [ ] * apps . Deployment , error ) {
if len ( rs . Labels ) == 0 {
return nil , fmt . Errorf ( "no deployments found for ReplicaSet %v because it has no labels" , rs . Name )
}
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
dList , err := deploymentLister . Deployments ( rs . Namespace ) . List ( labels . Everything ( ) )
if err != nil {
return nil , err
}
var deployments [ ] * apps . Deployment
for _ , d := range dList {
selector , err := metav1 . LabelSelectorAsSelector ( d . Spec . Selector )
if err != nil {
return nil , fmt . Errorf ( "invalid label selector: %v" , err )
}
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
if selector . Empty ( ) || ! selector . Matches ( labels . Set ( rs . Labels ) ) {
continue
}
deployments = append ( deployments , d )
}
if len ( deployments ) == 0 {
return nil , fmt . Errorf ( "could not find deployments set for ReplicaSet %s in namespace %s with labels: %v" , rs . Name , rs . Namespace , rs . Labels )
}
return deployments , nil
}
2020-12-20 00:37:00 -05:00
// ReplicaSetsByRevision sorts a list of ReplicaSet by revision, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from old to new replica sets.
type ReplicaSetsByRevision [ ] * apps . ReplicaSet
func ( o ReplicaSetsByRevision ) Len ( ) int { return len ( o ) }
func ( o ReplicaSetsByRevision ) Swap ( i , j int ) { o [ i ] , o [ j ] = o [ j ] , o [ i ] }
func ( o ReplicaSetsByRevision ) Less ( i , j int ) bool {
revision1 , err1 := Revision ( o [ i ] )
revision2 , err2 := Revision ( o [ j ] )
if err1 != nil || err2 != nil || revision1 == revision2 {
return controller . ReplicaSetsByCreationTimestamp ( o ) . Less ( i , j )
}
return revision1 < revision2
}