mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-03-24 19:36:00 -04:00
TestNodeSyncResync closes opChan after observing the first resync and then waits for the loop to exit. There is still a small window where the 1ms resync timer fires again before the select notices the closed channel. When that happens ReportResult sends a second notification on the unbuffered reportChan, the loop blocks in the send, and the test waits forever on doneChan. Allow one queued notification so the loop can drain that race and reach the closed opChan case. The test still validates that a resync happened; it just stops depending on exact scheduling between two ready events. Tested: go test -race ./pkg/controller/nodeipam/ipam/sync -run TestNodeSyncResync -count=200
307 lines
7.7 KiB
Go
307 lines
7.7 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package sync
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"reflect"
|
|
"testing"
|
|
"time"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/klog/v2/ktesting"
|
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
|
netutils "k8s.io/utils/net"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
)
|
|
|
|
var (
|
|
_, clusterCIDRRange, _ = netutils.ParseCIDRSloppy("10.1.0.0/16")
|
|
)
|
|
|
|
type fakeEvent struct {
|
|
nodeName string
|
|
reason string
|
|
}
|
|
|
|
type fakeAPIs struct {
|
|
aliasRange *net.IPNet
|
|
aliasErr error
|
|
addAliasErr error
|
|
nodeRet *v1.Node
|
|
nodeErr error
|
|
updateNodeErr error
|
|
resyncTimeout time.Duration
|
|
reportChan chan struct{}
|
|
|
|
updateNodeNetworkUnavailableErr error
|
|
|
|
calls []string
|
|
events []fakeEvent
|
|
results []error
|
|
|
|
logger klog.Logger
|
|
}
|
|
|
|
func (f *fakeAPIs) Alias(ctx context.Context, node *v1.Node) (*net.IPNet, error) {
|
|
f.calls = append(f.calls, fmt.Sprintf("alias %v", node.Name))
|
|
return f.aliasRange, f.aliasErr
|
|
}
|
|
|
|
func (f *fakeAPIs) AddAlias(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
|
|
f.calls = append(f.calls, fmt.Sprintf("addAlias %v %v", node.Name, cidrRange))
|
|
return f.addAliasErr
|
|
}
|
|
|
|
func (f *fakeAPIs) Node(ctx context.Context, name string) (*v1.Node, error) {
|
|
f.calls = append(f.calls, fmt.Sprintf("node %v", name))
|
|
return f.nodeRet, f.nodeErr
|
|
}
|
|
|
|
func (f *fakeAPIs) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
|
|
f.calls = append(f.calls, fmt.Sprintf("updateNode %v", node))
|
|
return f.updateNodeErr
|
|
}
|
|
|
|
func (f *fakeAPIs) UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error {
|
|
f.calls = append(f.calls, fmt.Sprintf("updateNodeNetworkUnavailable %v %v", nodeName, unavailable))
|
|
return f.updateNodeNetworkUnavailableErr
|
|
}
|
|
|
|
func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ...interface{}) {
|
|
f.events = append(f.events, fakeEvent{nodeName, reason})
|
|
}
|
|
|
|
func (f *fakeAPIs) ReportResult(err error) {
|
|
f.logger.V(2).Info("ReportResult", "err", err)
|
|
f.results = append(f.results, err)
|
|
if f.reportChan != nil {
|
|
f.reportChan <- struct{}{}
|
|
}
|
|
}
|
|
|
|
func (f *fakeAPIs) ResyncTimeout() time.Duration {
|
|
if f.resyncTimeout == 0 {
|
|
return time.Second * 10000
|
|
}
|
|
return f.resyncTimeout
|
|
}
|
|
|
|
func (f *fakeAPIs) dumpTrace() {
|
|
for i, x := range f.calls {
|
|
f.logger.Info("trace", "index", i, "call", x)
|
|
}
|
|
}
|
|
|
|
var nodeWithoutCIDRRange = &v1.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
|
}
|
|
|
|
var nodeWithCIDRRange = &v1.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
|
Spec: v1.NodeSpec{PodCIDR: "10.1.1.0/24"},
|
|
}
|
|
|
|
func TestNodeSyncUpdate(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
for _, tc := range []struct {
|
|
desc string
|
|
mode NodeSyncMode
|
|
node *v1.Node
|
|
fake fakeAPIs
|
|
|
|
events []fakeEvent
|
|
wantError bool
|
|
}{
|
|
{
|
|
desc: "validate range ==",
|
|
mode: SyncFromCloud,
|
|
node: nodeWithCIDRRange,
|
|
fake: fakeAPIs{
|
|
aliasRange: test.MustParseCIDR(nodeWithCIDRRange.Spec.PodCIDR),
|
|
},
|
|
},
|
|
{
|
|
desc: "validate range !=",
|
|
mode: SyncFromCloud,
|
|
node: nodeWithCIDRRange,
|
|
fake: fakeAPIs{aliasRange: test.MustParseCIDR("192.168.0.0/24")},
|
|
events: []fakeEvent{{"node1", "CloudCIDRAllocatorMismatch"}},
|
|
},
|
|
{
|
|
desc: "update alias from node",
|
|
mode: SyncFromCloud,
|
|
node: nodeWithCIDRRange,
|
|
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
|
wantError: true,
|
|
},
|
|
{
|
|
desc: "update alias from node",
|
|
mode: SyncFromCluster,
|
|
node: nodeWithCIDRRange,
|
|
// XXX/bowei -- validation
|
|
},
|
|
{
|
|
desc: "update node from alias",
|
|
mode: SyncFromCloud,
|
|
node: nodeWithoutCIDRRange,
|
|
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
|
|
// XXX/bowei -- validation
|
|
},
|
|
{
|
|
desc: "update node from alias",
|
|
mode: SyncFromCluster,
|
|
node: nodeWithoutCIDRRange,
|
|
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
|
|
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
|
wantError: true,
|
|
},
|
|
{
|
|
desc: "allocate range",
|
|
mode: SyncFromCloud,
|
|
node: nodeWithoutCIDRRange,
|
|
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
|
wantError: true,
|
|
},
|
|
{
|
|
desc: "allocate range",
|
|
mode: SyncFromCluster,
|
|
node: nodeWithoutCIDRRange,
|
|
},
|
|
{
|
|
desc: "update with node==nil",
|
|
mode: SyncFromCluster,
|
|
node: nil,
|
|
fake: fakeAPIs{
|
|
nodeRet: nodeWithCIDRRange,
|
|
},
|
|
wantError: false,
|
|
},
|
|
} {
|
|
logger, _ := ktesting.NewTestContext(t)
|
|
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
|
tc.fake.logger = logger
|
|
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
|
|
doneChan := make(chan struct{})
|
|
|
|
// Do a single step of the loop.
|
|
go sync.Loop(logger, doneChan)
|
|
sync.Update(tc.node)
|
|
close(sync.opChan)
|
|
<-doneChan
|
|
tc.fake.dumpTrace()
|
|
|
|
if !reflect.DeepEqual(tc.fake.events, tc.events) {
|
|
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
|
|
}
|
|
|
|
var hasError bool
|
|
for _, r := range tc.fake.results {
|
|
hasError = hasError || (r != nil)
|
|
}
|
|
if hasError != tc.wantError {
|
|
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
|
|
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestNodeSyncResync(t *testing.T) {
|
|
logger, _ := ktesting.NewTestContext(t)
|
|
fake := &fakeAPIs{
|
|
nodeRet: nodeWithCIDRRange,
|
|
resyncTimeout: time.Millisecond,
|
|
// Allow one extra resync notification to land while the test is
|
|
// closing the loop down.
|
|
reportChan: make(chan struct{}, 1),
|
|
logger: logger,
|
|
}
|
|
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
|
sync := New(fake, fake, fake, SyncFromCluster, "node1", cidr)
|
|
doneChan := make(chan struct{})
|
|
go sync.Loop(logger, doneChan)
|
|
// Wait for the first resync to complete
|
|
<-fake.reportChan
|
|
// Close the operation channel to stop the loop
|
|
close(sync.opChan)
|
|
// Wait for the loop to complete
|
|
<-doneChan
|
|
fake.dumpTrace()
|
|
}
|
|
|
|
func TestNodeSyncDelete(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
for _, tc := range []struct {
|
|
desc string
|
|
mode NodeSyncMode
|
|
node *v1.Node
|
|
fake fakeAPIs
|
|
}{
|
|
{
|
|
desc: "delete",
|
|
mode: SyncFromCluster,
|
|
node: nodeWithCIDRRange,
|
|
},
|
|
{
|
|
desc: "delete without CIDR range",
|
|
mode: SyncFromCluster,
|
|
node: nodeWithoutCIDRRange,
|
|
},
|
|
{
|
|
desc: "delete with invalid CIDR range",
|
|
mode: SyncFromCluster,
|
|
node: &v1.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
|
Spec: v1.NodeSpec{PodCIDR: "invalid"},
|
|
},
|
|
},
|
|
} {
|
|
logger, _ := ktesting.NewTestContext(t)
|
|
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
|
tc.fake.logger = logger
|
|
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
|
|
doneChan := make(chan struct{})
|
|
|
|
// Do a single step of the loop.
|
|
go sync.Loop(logger, doneChan)
|
|
sync.Delete(tc.node)
|
|
<-doneChan
|
|
tc.fake.dumpTrace()
|
|
|
|
/*
|
|
if !reflect.DeepEqual(tc.fake.events, tc.events) {
|
|
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
|
|
}
|
|
|
|
var hasError bool
|
|
for _, r := range tc.fake.results {
|
|
hasError = hasError || (r != nil)
|
|
}
|
|
if hasError != tc.wantError {
|
|
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
|
|
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
|
|
}
|
|
*/
|
|
}
|
|
}
|