mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
Merge 782203c96c into 44ff6085cd
This commit is contained in:
commit
8853bbd2b9
3 changed files with 346 additions and 385 deletions
|
|
@ -162,6 +162,7 @@ type nftablesTracer struct {
|
|||
// additional info about rules we've hit
|
||||
markMasq bool
|
||||
masquerade bool
|
||||
dnat bool
|
||||
}
|
||||
|
||||
// newNFTablesTracer creates an nftablesTracer. nodeIPs are the IP to treat as local node
|
||||
|
|
@ -269,16 +270,21 @@ var sourceAddrRegexp = regexp.MustCompile(`^ip6* saddr (!= )?(\S+)`)
|
|||
var sourceAddrLookupRegexp = regexp.MustCompile(`^ip6* saddr (!= )?\{([^}]*)\}`)
|
||||
var sourceAddrLocalRegexp = regexp.MustCompile(`^fib saddr type local`)
|
||||
|
||||
var endpointVMAPRegexp = regexp.MustCompile(`^numgen random mod \d+ vmap \{(.*)\}$`)
|
||||
var endpointVMapRegexp = regexp.MustCompile(`^numgen random mod \d+ vmap \{(.*)\}$`)
|
||||
var endpointVMapEntryRegexp = regexp.MustCompile(`\d+ : goto (\S+)`)
|
||||
var endpointDNATRegexp = regexp.MustCompile(`^dnat ip6* addr \. port to numgen random mod \d+ map \{(.*)\}$`)
|
||||
var endpointDNATEntryRegexp = regexp.MustCompile(`\d+ : (\S+) \. (\d+)`)
|
||||
|
||||
var masqMarkRegexp = regexp.MustCompile(`^mark set mark or 0x[[:xdigit:]]+$`)
|
||||
var masqCheckRegexp = regexp.MustCompile(`^mark and 0x[[:xdigit:]]+ != 0 mark set mark xor 0x[[:xdigit:]]+`)
|
||||
var masqueradeRegexp = regexp.MustCompile(`^masquerade fully-random$`)
|
||||
var dnatCheckRegexp = regexp.MustCompile(`^ct status dnat`)
|
||||
var hairpinCheckRegexp = regexp.MustCompile(`^ip6* saddr \. ip6* daddr \. meta l4proto \. th dport @hairpin-connections`)
|
||||
var jumpRegexp = regexp.MustCompile(`^(jump|goto) (\S+)$`)
|
||||
var returnRegexp = regexp.MustCompile(`^return$`)
|
||||
var verdictRegexp = regexp.MustCompile(`^(drop|reject)$`)
|
||||
var dnatRegexp = regexp.MustCompile(`^meta l4proto (tcp|udp|sctp) dnat to (\S+)$`)
|
||||
var l4protoRegexp = regexp.MustCompile(`^meta l4proto (tcp|udp|sctp)`)
|
||||
var dnatRegexp = regexp.MustCompile(`^dnat to (\S+)$`)
|
||||
|
||||
var ignoredRegexp = regexp.MustCompile(strings.Join(
|
||||
[]string{
|
||||
|
|
@ -310,6 +316,28 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
|
|||
// some of the regexes might match parts of others.
|
||||
|
||||
switch {
|
||||
case dnatCheckRegexp.MatchString(rule):
|
||||
// `^ct status dnat`
|
||||
// Part of the hairpin check
|
||||
match := dnatCheckRegexp.FindStringSubmatch(rule)
|
||||
rule = strings.TrimPrefix(rule, match[0])
|
||||
if !tracer.dnat {
|
||||
rule = ""
|
||||
break
|
||||
}
|
||||
|
||||
case hairpinCheckRegexp.MatchString(rule):
|
||||
// `^ip6* saddr \. ip6* daddr \. meta l4proto \. th dport @hairpin-connections`
|
||||
// Tests whether the packet is hairpinning back to its client.
|
||||
match := hairpinCheckRegexp.FindStringSubmatch(rule)
|
||||
rule = strings.TrimPrefix(rule, match[0])
|
||||
if sourceIP != destIP {
|
||||
rule = ""
|
||||
break
|
||||
}
|
||||
// HACK: we don't actually bother doing the full lookup; we
|
||||
// assume that if src and dst matched, it's hairpin.
|
||||
|
||||
case destIPOnlyLookupRegexp.MatchString(rule):
|
||||
// `^ip6* daddr @(\S+)`
|
||||
// Tests whether destIP is a member of the indicated set.
|
||||
|
|
@ -424,6 +452,15 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
|
|||
break
|
||||
}
|
||||
|
||||
case l4protoRegexp.MatchString(rule):
|
||||
// `meta l4proto (tcp|udp|sctp)`
|
||||
match := l4protoRegexp.FindStringSubmatch(rule)
|
||||
rule = strings.TrimPrefix(rule, match[0])
|
||||
if match[1] != protocol {
|
||||
rule = ""
|
||||
break
|
||||
}
|
||||
|
||||
case masqMarkRegexp.MatchString(rule):
|
||||
// `^mark set mark or 0x[[:xdigit:]]+$`
|
||||
// Mark for masquerade.
|
||||
|
|
@ -490,20 +527,20 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
|
|||
return false
|
||||
|
||||
case dnatRegexp.MatchString(rule):
|
||||
// `meta l4proto (tcp|udp|sctp) dnat to (\S+)`
|
||||
// `^dnat to (\S+)$`
|
||||
// DNAT to an endpoint IP and terminate processing.
|
||||
match := dnatRegexp.FindStringSubmatch(rule)
|
||||
destEndpoint := match[2]
|
||||
destEndpoint := match[1]
|
||||
|
||||
tracer.matches = append(tracer.matches, ruleObj.Rule)
|
||||
tracer.outputs = append(tracer.outputs, destEndpoint)
|
||||
return true
|
||||
|
||||
case endpointVMAPRegexp.MatchString(rule):
|
||||
case endpointVMapRegexp.MatchString(rule):
|
||||
// `^numgen random mod \d+ vmap \{(.*)\}$`
|
||||
// Selects a random endpoint and jumps to it. For tracePacket's
|
||||
// purposes, we jump to *all* of the endpoints.
|
||||
match := endpointVMAPRegexp.FindStringSubmatch(rule)
|
||||
match := endpointVMapRegexp.FindStringSubmatch(rule)
|
||||
elements := match[1]
|
||||
|
||||
for _, match = range endpointVMapEntryRegexp.FindAllStringSubmatch(elements, -1) {
|
||||
|
|
@ -518,6 +555,22 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
|
|||
}
|
||||
return true
|
||||
|
||||
case endpointDNATRegexp.MatchString(rule):
|
||||
// `^dnat ip6* addr \. port to numgen random mod \d+ map \{(.*)\}$`
|
||||
// Selects a random endpoint and DNATs to it. For tracePacket's
|
||||
// purposes, we DNAT to *all* of the endpoints.
|
||||
match := endpointDNATRegexp.FindStringSubmatch(rule)
|
||||
elements := match[1]
|
||||
|
||||
for _, match = range endpointDNATEntryRegexp.FindAllStringSubmatch(elements, -1) {
|
||||
// `\d+ : (\S+) \. (\d+)`
|
||||
endpointIP, endpointPort := match[1], match[2]
|
||||
|
||||
tracer.matches = append(tracer.matches, ruleObj.Rule)
|
||||
tracer.outputs = append(tracer.outputs, net.JoinHostPort(endpointIP, endpointPort))
|
||||
}
|
||||
return true
|
||||
|
||||
default:
|
||||
tracer.t.Errorf("unmatched rule: %s", ruleObj.Rule)
|
||||
rule = ""
|
||||
|
|
@ -552,6 +605,7 @@ func tracePacket(t *testing.T, nft *knftables.Fake, sourceIP, protocol, destIP,
|
|||
if err != nil {
|
||||
t.Errorf("failed to parse host port '%s': %s", tracer.outputs[0], err.Error())
|
||||
}
|
||||
tracer.dnat = true
|
||||
}
|
||||
|
||||
// Run filter-forward, return if packet is terminated.
|
||||
|
|
|
|||
|
|
@ -93,7 +93,8 @@ const (
|
|||
firewallCheckChain = "firewall-check"
|
||||
|
||||
// masquerading
|
||||
masqueradingChain = "masquerading"
|
||||
masqueradingChain = "masquerading"
|
||||
hairpinConnectionsSet = "hairpin-connections"
|
||||
)
|
||||
|
||||
// NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies.
|
||||
|
|
@ -154,7 +155,6 @@ type Proxier struct {
|
|||
// updating nftables with some partial data after kube-proxy restart.
|
||||
endpointSlicesSynced bool
|
||||
servicesSynced bool
|
||||
syncedOnce bool
|
||||
lastFullSync time.Time
|
||||
needFullSync bool
|
||||
initialized int32
|
||||
|
|
@ -197,6 +197,7 @@ type Proxier struct {
|
|||
noEndpointServices *nftElementStorage
|
||||
noEndpointNodePorts *nftElementStorage
|
||||
serviceNodePorts *nftElementStorage
|
||||
hairpinConnections *nftElementStorage
|
||||
}
|
||||
|
||||
// Proxier implements proxy.Provider
|
||||
|
|
@ -267,6 +268,7 @@ func NewProxier(ctx context.Context,
|
|||
noEndpointServices: newNFTElementStorage("map", noEndpointServicesMap),
|
||||
noEndpointNodePorts: newNFTElementStorage("map", noEndpointNodePortsMap),
|
||||
serviceNodePorts: newNFTElementStorage("map", serviceNodePortsMap),
|
||||
hairpinConnections: newNFTElementStorage("set", hairpinConnectionsSet),
|
||||
}
|
||||
|
||||
logger.V(2).Info("NFTables sync params", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "maxSyncPeriod", proxyutil.FullSyncPeriod)
|
||||
|
|
@ -463,6 +465,13 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
|||
ensureChain(chain, tx, createdChains, false)
|
||||
}
|
||||
|
||||
// add hairpin-connections set
|
||||
tx.Add(&knftables.Set{
|
||||
Name: hairpinConnectionsSet,
|
||||
Type: ipvX_addr + " . " + ipvX_addr + " . inet_proto . inet_service",
|
||||
Comment: ptr.To("service hairpin connections"),
|
||||
})
|
||||
|
||||
// Add the rules in the masquerading chain
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: masqueradingChain,
|
||||
|
|
@ -472,6 +481,14 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
|||
"masquerade fully-random",
|
||||
),
|
||||
})
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: masqueradingChain,
|
||||
Rule: knftables.Concat(
|
||||
"ct status dnat",
|
||||
ipX, "saddr", ".", ipX, "daddr", ".", "meta l4proto", ".", "th dport", "@", hairpinConnectionsSet,
|
||||
"masquerade fully-random",
|
||||
),
|
||||
})
|
||||
|
||||
// add cluster-ips set.
|
||||
tx.Add(&knftables.Set{
|
||||
|
|
@ -614,6 +631,33 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
|||
Type: "inet_proto . inet_service : verdict",
|
||||
Comment: ptr.To("NodePort traffic"),
|
||||
})
|
||||
|
||||
if proxier.masqueradeAll {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: servicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", "@", clusterIPsSet,
|
||||
proxier.masqueradeRule,
|
||||
),
|
||||
Comment: ptr.To("masquerade all clusterIP traffic"),
|
||||
})
|
||||
} else if proxier.localDetector.IsImplemented() {
|
||||
// This masquerades off-cluster traffic to a service VIP. The
|
||||
// idea is that you can establish a static route for your
|
||||
// Service range, routing to any node, and that node will
|
||||
// bridge into the Service for you. Since that might bounce
|
||||
// off-node, we masquerade here.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: servicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", "@", clusterIPsSet,
|
||||
proxier.localDetector.IfNotLocalNFT(),
|
||||
proxier.masqueradeRule,
|
||||
),
|
||||
Comment: ptr.To("masquerade clusterIP traffic from outside cluster"),
|
||||
})
|
||||
}
|
||||
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: servicesChain,
|
||||
Rule: knftables.Concat(
|
||||
|
|
@ -649,6 +693,7 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
|||
proxier.noEndpointServices.readOrReset(tx, proxier.nftables, proxier.logger)
|
||||
proxier.noEndpointNodePorts.readOrReset(tx, proxier.nftables, proxier.logger)
|
||||
proxier.serviceNodePorts.readOrReset(tx, proxier.nftables, proxier.logger)
|
||||
proxier.hairpinConnections.readOrReset(tx, proxier.nftables, proxier.logger)
|
||||
}
|
||||
|
||||
// Sync is called to synchronize the proxier state to nftables as soon as possible.
|
||||
|
|
@ -1034,7 +1079,6 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
doFullSync := proxier.needFullSync || (time.Since(proxier.lastFullSync) > proxyutil.FullSyncPeriod)
|
||||
|
||||
defer func() {
|
||||
proxier.syncedOnce = true
|
||||
metrics.SyncProxyRulesLatency.WithLabelValues(string(proxier.ipFamily)).Observe(metrics.SinceInSeconds(start))
|
||||
if !doFullSync {
|
||||
metrics.SyncPartialProxyRulesLatency.WithLabelValues(string(proxier.ipFamily)).Observe(metrics.SinceInSeconds(start))
|
||||
|
|
@ -1169,16 +1213,33 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
!serviceUpdateResult.UpdatedServices.Has(svcName.NamespacedName) &&
|
||||
!endpointUpdateResult.UpdatedServices.Has(svcName.NamespacedName)
|
||||
|
||||
// Note the endpoint chains that will be used
|
||||
// We only use separate endpoint chains when adding affinity rules
|
||||
serviceUsesAffinity := svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP
|
||||
|
||||
// Note the endpoints that will be used
|
||||
for _, ep := range allLocallyReachableEndpoints {
|
||||
if epInfo, ok := ep.(*endpointInfo); ok {
|
||||
ensureChain(epInfo.chainName, tx, activeChains, skipServiceUpdate ||
|
||||
proxier.epChainSkipUpdate(existingChains, existingAffinitySets, svcInfo, epInfo))
|
||||
// Note the affinity sets that will be used
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
activeAffinitySets.Insert(epInfo.affinitySetName)
|
||||
}
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If using affinity, note the endpoint chain and affinity set
|
||||
// names, and ensure that the chain exists.
|
||||
if serviceUsesAffinity {
|
||||
ensureChain(epInfo.chainName, tx, activeChains, skipServiceUpdate)
|
||||
activeAffinitySets.Insert(epInfo.affinitySetName)
|
||||
}
|
||||
|
||||
// Add endpoint to the hairpin set
|
||||
proxier.hairpinConnections.ensureElem(tx, &knftables.Element{
|
||||
Set: hairpinConnectionsSet,
|
||||
Key: []string{
|
||||
epInfo.IP(),
|
||||
epInfo.IP(),
|
||||
protocol,
|
||||
strconv.Itoa(epInfo.Port()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// clusterPolicyChain contains the endpoints used with "Cluster" traffic policy
|
||||
|
|
@ -1430,35 +1491,6 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
continue
|
||||
}
|
||||
|
||||
// Set up internal traffic handling.
|
||||
if hasInternalEndpoints {
|
||||
if proxier.masqueradeAll {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: internalTrafficChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", svcInfo.ClusterIP(),
|
||||
proxier.masqueradeRule,
|
||||
),
|
||||
Comment: ptr.To("masquerade all service traffic"),
|
||||
})
|
||||
} else if proxier.localDetector.IsImplemented() {
|
||||
// This masquerades off-cluster traffic to a service VIP. The
|
||||
// idea is that you can establish a static route for your
|
||||
// Service range, routing to any node, and that node will
|
||||
// bridge into the Service for you. Since that might bounce
|
||||
// off-node, we masquerade here.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: internalTrafficChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", svcInfo.ClusterIP(),
|
||||
proxier.localDetector.IfNotLocalNFT(),
|
||||
proxier.masqueradeRule,
|
||||
),
|
||||
Comment: ptr.To("masquerade traffic from outside cluster"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Set up external traffic handling (if any "external" destinations are
|
||||
// enabled). All captured traffic for all external destinations should
|
||||
// jump to externalTrafficChain, which will handle some special cases and
|
||||
|
|
@ -1596,44 +1628,32 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
}
|
||||
}
|
||||
|
||||
// If Cluster policy is in use, create the chain and create rules jumping
|
||||
// from clusterPolicyChain to the clusterEndpoints
|
||||
if usesClusterPolicyChain {
|
||||
proxier.writeServiceToEndpointRules(tx, svcInfo, clusterPolicyChain, clusterEndpoints)
|
||||
}
|
||||
|
||||
// If Local policy is in use, create rules jumping from localPolicyChain
|
||||
// to the localEndpoints
|
||||
if usesLocalPolicyChain {
|
||||
proxier.writeServiceToEndpointRules(tx, svcInfo, localPolicyChain, localEndpoints)
|
||||
}
|
||||
|
||||
// Generate the per-endpoint chains
|
||||
for _, ep := range allLocallyReachableEndpoints {
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
if !ok {
|
||||
proxier.logger.Error(nil, "Failed to cast endpointInfo", "endpointInfo", ep)
|
||||
continue
|
||||
// Write the endpoint rules and/or chains
|
||||
if !serviceUsesAffinity {
|
||||
if usesClusterPolicyChain {
|
||||
proxier.writeServiceToEndpointDNATs(tx, svcInfo, clusterPolicyChain, clusterEndpoints)
|
||||
}
|
||||
if usesLocalPolicyChain {
|
||||
proxier.writeServiceToEndpointDNATs(tx, svcInfo, localPolicyChain, localEndpoints)
|
||||
}
|
||||
} else {
|
||||
if usesClusterPolicyChain {
|
||||
proxier.writeServiceToEndpointJumps(tx, svcInfo, clusterPolicyChain, clusterEndpoints)
|
||||
}
|
||||
if usesLocalPolicyChain {
|
||||
proxier.writeServiceToEndpointJumps(tx, svcInfo, localPolicyChain, localEndpoints)
|
||||
}
|
||||
|
||||
if proxier.epChainSkipUpdate(existingChains, existingAffinitySets, svcInfo, epInfo) {
|
||||
// If the EP chain is already updated, we can skip it.
|
||||
continue
|
||||
}
|
||||
endpointChain := epInfo.chainName
|
||||
// And generate the per-endpoint chains and affinity sets
|
||||
for _, ep := range allLocallyReachableEndpoints {
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle traffic that loops back to the originator with SNAT.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: endpointChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "saddr", epInfo.IP(),
|
||||
proxier.masqueradeRule,
|
||||
),
|
||||
Comment: ptr.To("masquerade hairpin traffic"),
|
||||
})
|
||||
endpointChain := epInfo.chainName
|
||||
|
||||
// Handle session affinity
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
// Handle session affinity
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: endpointChain,
|
||||
Rule: knftables.Concat(
|
||||
|
|
@ -1641,16 +1661,16 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
"{", ipX, "saddr", "}",
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
// DNAT to final destination.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: endpointChain,
|
||||
Rule: knftables.Concat(
|
||||
"meta l4proto", protocol,
|
||||
"dnat to", epInfo.String(),
|
||||
),
|
||||
})
|
||||
// DNAT to final destination.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: endpointChain,
|
||||
Rule: knftables.Concat(
|
||||
"meta l4proto", protocol,
|
||||
"dnat to", epInfo.String(),
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1688,6 +1708,7 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
proxier.noEndpointServices.cleanupLeftoverKeys(tx)
|
||||
proxier.noEndpointNodePorts.cleanupLeftoverKeys(tx)
|
||||
proxier.serviceNodePorts.cleanupLeftoverKeys(tx)
|
||||
proxier.hairpinConnections.cleanupLeftoverKeys(tx)
|
||||
|
||||
// Sync rules.
|
||||
proxier.logger.V(2).Info("Reloading service nftables data",
|
||||
|
|
@ -1746,55 +1767,12 @@ func (proxier *Proxier) syncProxyRules() (retryError error) {
|
|||
return
|
||||
}
|
||||
|
||||
// epChainSkipUpdate returns true if the EP chain doesn't need to be updated.
|
||||
func (proxier *Proxier) epChainSkipUpdate(existingChains, existingAffinitySets sets.Set[string], svcInfo *servicePortInfo, epInfo *endpointInfo) bool {
|
||||
if proxier.syncedOnce {
|
||||
// We only skip updating EP chains during the first sync to speed up kube-proxy restart, otherwise return false.
|
||||
return false
|
||||
}
|
||||
if existingChains == nil || existingAffinitySets == nil {
|
||||
// listing existing objects failed, can't skip updating
|
||||
return false
|
||||
}
|
||||
// EP chain can have up to 3 rules:
|
||||
// - loopback masquerade rule
|
||||
// - includes the endpoint IP
|
||||
// - affinity rule when session affinity is set to ClusterIP
|
||||
// - includes the affinity set name
|
||||
// - DNAT rule
|
||||
// - includes the endpoint IP + port
|
||||
// EP chain name includes the endpoint IP + port => loopback and DNAT rules are pre-defined by the chain name.
|
||||
// When session affinity is set to ClusterIP, the affinity set is created for local endpoints.
|
||||
// Therefore, we can check that sessions affinity hasn't changed by checking if the affinity set exists.
|
||||
wantAffinitySet := svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP
|
||||
return existingChains.Has(epInfo.chainName) && wantAffinitySet == existingAffinitySets.Has(epInfo.affinitySetName)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) writeServiceToEndpointRules(tx *knftables.Transaction, svcInfo *servicePortInfo, svcChain string, endpoints []proxy.Endpoint) {
|
||||
// First write session affinity rules, if applicable.
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
ipX := "ip"
|
||||
if proxier.ipFamily == v1.IPv6Protocol {
|
||||
ipX = "ip6"
|
||||
}
|
||||
|
||||
for _, ep := range endpoints {
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: svcChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "saddr", "@", epInfo.affinitySetName,
|
||||
"goto", epInfo.chainName,
|
||||
),
|
||||
})
|
||||
}
|
||||
func (proxier *Proxier) writeServiceToEndpointDNATs(tx *knftables.Transaction, svcInfo *servicePortInfo, svcChain string, endpoints []proxy.Endpoint) {
|
||||
ipX := "ip"
|
||||
if proxier.ipFamily == v1.IPv6Protocol {
|
||||
ipX = "ip6"
|
||||
}
|
||||
|
||||
// Now write loadbalancing rule
|
||||
var elements []string
|
||||
for i, ep := range endpoints {
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
|
|
@ -1803,12 +1781,53 @@ func (proxier *Proxier) writeServiceToEndpointRules(tx *knftables.Transaction, s
|
|||
}
|
||||
|
||||
elements = append(elements,
|
||||
strconv.Itoa(i), ":", "goto", epInfo.chainName,
|
||||
strconv.Itoa(i), ":", epInfo.IP(), ".", strconv.Itoa(epInfo.Port()),
|
||||
)
|
||||
if i != len(endpoints)-1 {
|
||||
elements = append(elements, ",")
|
||||
}
|
||||
}
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: svcChain,
|
||||
Rule: knftables.Concat(
|
||||
"meta l4proto", strings.ToLower(string(svcInfo.Protocol())),
|
||||
"dnat", ipX, "addr . port to",
|
||||
"numgen random mod", len(endpoints), "map",
|
||||
"{", elements, "}",
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
func (proxier *Proxier) writeServiceToEndpointJumps(tx *knftables.Transaction, svcInfo *servicePortInfo, svcChain string, endpoints []proxy.Endpoint) {
|
||||
ipX := "ip"
|
||||
if proxier.ipFamily == v1.IPv6Protocol {
|
||||
ipX = "ip6"
|
||||
}
|
||||
|
||||
var elements []string
|
||||
// Write the affinity rules, construct the vmap elements
|
||||
for i, ep := range endpoints {
|
||||
epInfo, ok := ep.(*endpointInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: svcChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "saddr", "@", epInfo.affinitySetName,
|
||||
"goto", epInfo.chainName,
|
||||
),
|
||||
})
|
||||
|
||||
elements = append(elements,
|
||||
strconv.Itoa(i), ":", "goto", epInfo.chainName,
|
||||
)
|
||||
if i != len(endpoints)-1 {
|
||||
elements = append(elements, ",")
|
||||
}
|
||||
}
|
||||
// Now write the vmap, for the case where no affinity rule matched
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: svcChain,
|
||||
Rule: knftables.Concat(
|
||||
|
|
|
|||
|
|
@ -136,6 +136,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
|
|||
noEndpointServices: newNFTElementStorage("map", noEndpointServicesMap),
|
||||
noEndpointNodePorts: newNFTElementStorage("map", noEndpointNodePortsMap),
|
||||
serviceNodePorts: newNFTElementStorage("map", serviceNodePortsMap),
|
||||
hairpinConnections: newNFTElementStorage("set", hairpinConnectionsSet),
|
||||
}
|
||||
p.setInitialized(true)
|
||||
p.syncRunner = runner.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, 30*time.Second, time.Minute)
|
||||
|
|
@ -148,6 +149,7 @@ var baseRules = dedent.Dedent(`
|
|||
|
||||
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
|
||||
add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
|
||||
add set ip kube-proxy hairpin-connections { type ipv4_addr . ipv4_addr . inet_proto . inet_service ; comment "service hairpin connections" ; }
|
||||
|
||||
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
||||
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
||||
|
|
@ -183,11 +185,13 @@ var baseRules = dedent.Dedent(`
|
|||
add rule ip kube-proxy filter-output ct state new jump cluster-ips-check
|
||||
add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
|
||||
add rule ip kube-proxy masquerading mark and 0x4000 != 0 mark set mark xor 0x4000 masquerade fully-random
|
||||
add rule ip kube-proxy masquerading ct status dnat ip saddr . ip daddr . meta l4proto . th dport @hairpin-connections masquerade fully-random
|
||||
add rule ip kube-proxy nat-output jump services
|
||||
add rule ip kube-proxy nat-postrouting jump masquerading
|
||||
add rule ip kube-proxy nat-prerouting jump services
|
||||
add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
|
||||
add rule ip kube-proxy reject-chain reject
|
||||
add rule ip kube-proxy services ip daddr @cluster-ips ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade clusterIP traffic from outside cluster"
|
||||
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
||||
add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
|
||||
|
||||
|
|
@ -362,33 +366,26 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||
expected := baseRules + dedent.Dedent(`
|
||||
# svc1
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 }
|
||||
|
||||
add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.180.0.1 . 80 }
|
||||
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.1 . 10.180.0.1 . tcp . 80 }
|
||||
|
||||
# svc2
|
||||
add chain ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80
|
||||
add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 ip daddr 172.30.0.42 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 }
|
||||
add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.180.0.2 . 80 }
|
||||
add chain ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80
|
||||
add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 ip saddr 10.0.0.0/8 goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit pod traffic"
|
||||
add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local mark set mark or 0x4000 comment "masquerade local traffic"
|
||||
add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit local traffic"
|
||||
add chain ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80
|
||||
add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 ip saddr 10.180.0.2 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 meta l4proto tcp dnat to 10.180.0.2:80
|
||||
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 80 : goto service-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.2 . 10.180.0.2 . tcp . 80 }
|
||||
|
||||
add element ip kube-proxy no-endpoint-nodeports { tcp . 3001 comment "ns2/svc2:p80" : drop }
|
||||
add element ip kube-proxy no-endpoint-services { 1.2.3.4 . tcp . 80 comment "ns2/svc2:p80" : drop }
|
||||
|
|
@ -396,41 +393,32 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||
|
||||
# svc3
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 }
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.180.0.3 . 80 }
|
||||
add chain ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 mark set mark or 0x4000 comment "masquerade"
|
||||
add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add chain ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80
|
||||
add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
|
||||
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3003 : goto external-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.3 . 10.180.0.3 . tcp . 80 }
|
||||
|
||||
# svc4
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 , 1 : goto endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 }
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 2 map { 0 : 10.180.0.4 . 80 , 1 : 10.180.0.5 . 80 }
|
||||
add chain ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 mark set mark or 0x4000 comment "masquerade"
|
||||
add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add chain ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80
|
||||
add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 ip saddr 10.180.0.5 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 meta l4proto tcp dnat to 10.180.0.5:80
|
||||
add chain ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80
|
||||
add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 ip saddr 10.180.0.4 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 meta l4proto tcp dnat to 10.180.0.4:80
|
||||
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 192.168.99.33 . tcp . 80 : goto external-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.4 . 10.180.0.4 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.5 . 10.180.0.5 . tcp . 80 }
|
||||
|
||||
# svc5
|
||||
add set ip kube-proxy affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { type ipv4_addr ; flags dynamic,timeout ; timeout 10800s ; }
|
||||
add chain ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip daddr 172.30.0.45 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
|
||||
add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 }
|
||||
add chain ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
|
|
@ -438,7 +426,6 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||
add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
|
||||
add chain ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 update @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { ip saddr }
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
|
||||
|
||||
|
|
@ -450,6 +437,7 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 5.6.7.8 . tcp . 80 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3002 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
add element ip kube-proxy firewall-ips { 5.6.7.8 . tcp . 80 : goto firewall-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
# 10.180.0.3:80 is already in hairpin-connections, so we don't want another "add element" line...
|
||||
|
||||
# svc6
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.46 }
|
||||
|
|
@ -3808,6 +3796,20 @@ func TestInternalExternalMasquerade(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// assertNumOperations asserts that the last transaction on nft had a number of operations
|
||||
// equal to the sum of the values in ops[]. (ops is multiple values just for ease of
|
||||
// explaining the math; see the callers below.)
|
||||
func assertNumOperations(t *testing.T, nft *knftables.Fake, ops ...int) {
|
||||
t.Helper()
|
||||
expectedOps := 0
|
||||
for _, n := range ops {
|
||||
expectedOps += n
|
||||
}
|
||||
if nft.LastTransaction.NumOperations() != expectedOps {
|
||||
t.Errorf("Expected transaction with %d operations, got %d", expectedOps, nft.LastTransaction.NumOperations())
|
||||
}
|
||||
}
|
||||
|
||||
// Test calling syncProxyRules() multiple times with various changes
|
||||
func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
nft, fp := NewFakeProxier(v1.IPv4Protocol)
|
||||
|
|
@ -3877,20 +3879,14 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.2.1 . 10.0.2.1 . tcp . 8080 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
|
||||
add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.2.1 . 8080 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
|
||||
|
|
@ -3930,35 +3926,24 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.2.1 . 10.0.2.1 . tcp . 8080 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.1 . 10.0.3.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
|
||||
add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.2.1 . 8080 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// add 1 element to cluster-ips and service-ips = 2 operations
|
||||
// add+flush 2 chains for service and endpoint, add 2 rules in each = 8 operations
|
||||
// 10 operations total.
|
||||
if nft.LastTransaction.NumOperations() != 10 {
|
||||
t.Errorf("Expected 10 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
3, // add 1 element each to cluster-ips, service-ips, and hairpin-connections
|
||||
3, // add+flush service chain, add 1 rule
|
||||
)
|
||||
|
||||
// Delete a service; its chains will be flushed, but not immediately deleted.
|
||||
fp.OnServiceDelete(svc2)
|
||||
|
|
@ -3968,31 +3953,22 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.1 . 10.0.3.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// delete 1 element from cluster-ips and service-ips = 2 operations
|
||||
// flush 2 chains for service and endpoint = 2 operations
|
||||
// 4 operations total.
|
||||
if nft.LastTransaction.NumOperations() != 4 {
|
||||
t.Errorf("Expected 4 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
3, // delete 1 element each from cluster-ips, service-ips, hairpin-connections
|
||||
1, // flush service chain
|
||||
)
|
||||
|
||||
// Fake the passage of time and confirm that the stale chains get deleted.
|
||||
ageStaleChains()
|
||||
|
|
@ -4002,26 +3978,18 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.1 . 10.0.3.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// delete stale chains happens in a separate transaction, nothing else changed => last transaction will have 0 operations.
|
||||
if nft.LastTransaction.NumOperations() != 0 {
|
||||
t.Errorf("Expected 0 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
// delete stale chains happens in a separate transaction, nothing else changed
|
||||
assertNumOperations(t, nft, 0)
|
||||
|
||||
// Add a service, sync, then add its endpoints.
|
||||
makeServiceMap(fp,
|
||||
|
|
@ -4042,28 +4010,21 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.1 . 10.0.3.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.1 . 80 }
|
||||
|
||||
add element ip kube-proxy no-endpoint-services { 172.30.0.44 . tcp . 80 comment "ns4/svc4:p80" : goto reject-chain }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// add 1 element to cluster-ips and no-endpoint-services = 2 operations
|
||||
if nft.LastTransaction.NumOperations() != 2 {
|
||||
t.Errorf("Expected 2 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
2, // add 1 element each to cluster-ips and no-endpoint-services
|
||||
)
|
||||
|
||||
populateEndpointSlices(fp,
|
||||
makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
|
||||
|
|
@ -4086,34 +4047,25 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.1 . 10.0.3.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.4.1 . 10.0.4.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
|
||||
add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.4.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// add 1 element to service-ips, remove 1 element from no-endpoint-services = 2 operations
|
||||
// add+flush 2 chains for service and endpoint, add 2 rules in each = 8 operations
|
||||
if nft.LastTransaction.NumOperations() != 10 {
|
||||
t.Errorf("Expected 10 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
2, // add 1 element to service-ips, remove 1 element from no-endpoint-services
|
||||
1, // add 1 element to hairpin-connections
|
||||
3, // add+flush service chain, add 1 rule
|
||||
)
|
||||
|
||||
// Change an endpoint of an existing service.
|
||||
eps3update := eps3.DeepCopy()
|
||||
|
|
@ -4129,35 +4081,24 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.2 . 10.0.3.2 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.4.1 . 10.0.4.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 }
|
||||
add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
|
||||
add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.3.2 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
|
||||
add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.4.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// add+flush 2 chains for service and endpoint, add 2 rules in each = 8 operations
|
||||
// flush old endpoint chain = 1 operation
|
||||
if nft.LastTransaction.NumOperations() != 9 {
|
||||
t.Errorf("Expected 9 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
2, // remove 1 old element from hairpin-connections, add 1 new one
|
||||
3, // add+flush service chain, add 1 rule
|
||||
)
|
||||
|
||||
// (Ensure the old svc3 chain gets deleted in the next sync.)
|
||||
ageStaleChains()
|
||||
|
|
@ -4175,36 +4116,26 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.2 . 10.0.3.2 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.3 . 10.0.3.3 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.4.1 . 10.0.4.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
|
||||
add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
|
||||
add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
|
||||
add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 2 map { 0 : 10.0.3.2 . 80 , 1 : 10.0.3.3 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
|
||||
add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.4.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// add+flush 3 chains for 1 service and 2 endpoints, add 2 rules in each = 12 operations
|
||||
if nft.LastTransaction.NumOperations() != 12 {
|
||||
t.Errorf("Expected 12 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
// (The first endpoint chain is unchanged, but the code recreates it anyway.)
|
||||
assertNumOperations(t, nft,
|
||||
1, // add 1 element to hairpin-connections
|
||||
3, // add+flush service chain, add 1 rule
|
||||
)
|
||||
|
||||
// Empty a service's endpoints; its chains will be flushed, but not immediately deleted.
|
||||
eps3update3 := eps3update2.DeepCopy()
|
||||
|
|
@ -4218,33 +4149,25 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.4.1 . 10.0.4.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
|
||||
add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
|
||||
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
|
||||
add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.4.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// remove 1 element from service-ips, add 1 element to no-endpoint-services = 2 operations
|
||||
// flush 3 chains = 3 operations
|
||||
if nft.LastTransaction.NumOperations() != 5 {
|
||||
t.Errorf("Expected 5 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
2, // remove 1 element from service-ips, add 1 element to no-endpoint-services
|
||||
2, // remove 2 elements from hairpin-connections
|
||||
1, // flush service chain
|
||||
)
|
||||
|
||||
expectedStaleChains := sets.NewString("service-4AT6LBPK-ns3/svc3/tcp/p80", "endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80", "endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80")
|
||||
expectedStaleChains := sets.NewString("service-4AT6LBPK-ns3/svc3/tcp/p80")
|
||||
gotStaleChains := sets.StringKeySet(fp.staleChains)
|
||||
if !expectedStaleChains.Equal(gotStaleChains) {
|
||||
t.Errorf("expected stale chains %v, got %v", expectedStaleChains, gotStaleChains)
|
||||
|
|
@ -4259,37 +4182,26 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.2 . 10.0.3.2 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.3.3 . 10.0.3.3 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.4.1 . 10.0.4.1 . tcp . 80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
|
||||
add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
|
||||
add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
|
||||
add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
|
||||
add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 2 map { 0 : 10.0.3.2 . 80 , 1 : 10.0.3.3 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
|
||||
add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
|
||||
add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.4.1 . 80 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// remove 1 element from no-endpoint-services, add 1 element to service-ips = 2 operations
|
||||
// add+flush 3 chains for 1 service and 2 endpoints, add 2 rules in each = 12 operations
|
||||
if nft.LastTransaction.NumOperations() != 14 {
|
||||
t.Errorf("Expected 14 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
2, // remove 1 element from no-endpoint-services, add 1 element to service-ips
|
||||
2, // add 2 elements to hairpin-connections
|
||||
3, // add+flush service chain, add 1 rule
|
||||
)
|
||||
|
||||
if len(fp.staleChains) != 0 {
|
||||
t.Errorf("unexpected stale chains: %v", fp.staleChains)
|
||||
|
|
@ -4309,9 +4221,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||
// Sync with no new changes, so same expected rules as last time
|
||||
fp.syncProxyRules()
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
if nft.LastTransaction.NumOperations() != 0 {
|
||||
t.Errorf("Expected 0 trasaction operations, got %d", nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft, 0)
|
||||
}
|
||||
|
||||
func TestSyncProxyRulesStartup(t *testing.T) {
|
||||
|
|
@ -4326,24 +4236,18 @@ func TestSyncProxyRulesStartup(t *testing.T) {
|
|||
// put a part of desired state to nftables
|
||||
err := nft.ParseDump(baseRules + dedent.Dedent(`
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
|
||||
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.1.1 . 80 }
|
||||
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.2.1 . 8080 }
|
||||
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.2.1 . 10.0.2.1 . tcp . 8080 }
|
||||
`))
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -4418,36 +4322,23 @@ func TestSyncProxyRulesStartup(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.1 . 10.0.1.1 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.1.2 . 10.0.1.2 . tcp . 80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.0.2.1 . 10.0.2.1 . tcp . 8080 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 , 1 : goto endpoint-ZCZBVNAZ-ns1/svc1/tcp/p80__10.0.1.2/80 }
|
||||
add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
|
||||
add chain ip kube-proxy endpoint-ZCZBVNAZ-ns1/svc1/tcp/p80__10.0.1.2/80
|
||||
add rule ip kube-proxy endpoint-ZCZBVNAZ-ns1/svc1/tcp/p80__10.0.1.2/80 ip saddr 10.0.1.2 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-ZCZBVNAZ-ns1/svc1/tcp/p80__10.0.1.2/80 meta l4proto tcp dnat to 10.0.1.2:80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 2 map { 0 : 10.0.1.1 . 80 , 1 : 10.0.1.2 . 80 }
|
||||
|
||||
add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
|
||||
add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
|
||||
add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.0.2.1 . 8080 }
|
||||
`)
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
// initial transaction consists of:
|
||||
// 1. nft setup, total ops = setupOps
|
||||
// 2. services setup (should skip adding existing set/map elements and endpoint chains+rules)
|
||||
// - add svc3 IP to the cluster-ips, and to the no-endpoint-services set = 2 ops
|
||||
// - add+flush 2 service chains + 2 rules each = 8 ops
|
||||
// - add+flush svc1 endpoint chain + 2 rules = 4 ops
|
||||
// total: 14 ops
|
||||
if nft.LastTransaction.NumOperations() != setupOps+14 {
|
||||
fmt.Println(nft.LastTransaction)
|
||||
t.Errorf("Expected %v trasaction operations, got %d", setupOps+14, nft.LastTransaction.NumOperations())
|
||||
}
|
||||
assertNumOperations(t, nft,
|
||||
setupOps, // nft setup
|
||||
1, // add new svc1 endpoint to hairpin-connections
|
||||
2, // add svc3 IP to the cluster-ips, and to the no-endpoint-services set
|
||||
6, // add+flush 2 service chains + 1 rule each
|
||||
)
|
||||
}
|
||||
|
||||
func TestNoEndpointsMetric(t *testing.T) {
|
||||
|
|
@ -4903,16 +4794,12 @@ func TestBadIPs(t *testing.T) {
|
|||
expected := baseRules + dedent.Dedent(`
|
||||
# svc1
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 ip saddr != 10.0.0.0/8 mark set mark or 0x4000 comment "masquerade traffic from outside cluster"
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 }
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 meta l4proto tcp dnat ip addr . port to numgen random mod 1 map { 0 : 10.180.0.1 . 80 }
|
||||
|
||||
add chain ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 mark set mark or 0x4000 comment "masquerade"
|
||||
add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
|
||||
add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 mark set mark or 0x4000 comment "masquerade hairpin traffic"
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
|
||||
|
||||
add chain ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 ip saddr != { 203.0.113.0/25 } drop
|
||||
|
|
@ -4923,6 +4810,7 @@ func TestBadIPs(t *testing.T) {
|
|||
add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy firewall-ips { 1.2.3.4 . tcp . 80 : goto firewall-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy hairpin-connections { 10.180.0.1 . 10.180.0.1 . tcp . 80 }
|
||||
`)
|
||||
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
|
|
|
|||
Loading…
Reference in a new issue