mirror of
https://github.com/k3s-io/k3s.git
synced 2026-02-03 20:39:49 -05:00
lint: duplicated-imports
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
23093122b0
commit
100cb633a3
4 changed files with 15 additions and 19 deletions
|
|
@ -29,7 +29,6 @@ import (
|
|||
pkgerrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1core "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
|
@ -125,21 +124,21 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
|
|||
informerFactory.Start(stopCh)
|
||||
informerFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2)
|
||||
ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2)
|
||||
iptablesCmdHandlers := make(map[v1.IPFamily]utils.IPTablesHandler, 2)
|
||||
ipSetHandlers := make(map[v1.IPFamily]utils.IPSetHandler, 2)
|
||||
|
||||
if nodeConfig.AgentConfig.EnableIPv4 {
|
||||
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
|
||||
if err != nil {
|
||||
return pkgerrors.WithMessage(err, "failed to create iptables handler")
|
||||
}
|
||||
iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
|
||||
iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
|
||||
|
||||
ipset, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
return pkgerrors.WithMessage(err, "failed to create ipset handler")
|
||||
}
|
||||
ipSetHandlers[v1core.IPv4Protocol] = ipset
|
||||
ipSetHandlers[v1.IPv4Protocol] = ipset
|
||||
}
|
||||
|
||||
if nodeConfig.AgentConfig.EnableIPv6 {
|
||||
|
|
@ -147,13 +146,13 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
|
|||
if err != nil {
|
||||
return pkgerrors.WithMessage(err, "failed to create iptables handler")
|
||||
}
|
||||
iptablesCmdHandlers[v1core.IPv6Protocol] = ipt6Handler
|
||||
iptablesCmdHandlers[v1.IPv6Protocol] = ipt6Handler
|
||||
|
||||
ipset, err := utils.NewIPSet(true)
|
||||
if err != nil {
|
||||
return pkgerrors.WithMessage(err, "failed to create ipset handler")
|
||||
}
|
||||
ipSetHandlers[v1core.IPv6Protocol] = ipset
|
||||
ipSetHandlers[v1.IPv6Protocol] = ipset
|
||||
}
|
||||
|
||||
// Start kube-router healthcheck controller; netpol requires it
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package proxy
|
|||
import (
|
||||
"context"
|
||||
"net"
|
||||
sysnet "net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
|
|
@ -117,12 +116,12 @@ func (p *proxy) SetHealthCheck(address string, healthCheck loadbalancer.HealthCh
|
|||
func (p *proxy) setSupervisorPort(addresses []string) []string {
|
||||
var newAddresses []string
|
||||
for _, address := range addresses {
|
||||
h, _, err := sysnet.SplitHostPort(address)
|
||||
h, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse address %s, dropping: %v", address, err)
|
||||
continue
|
||||
}
|
||||
newAddresses = append(newAddresses, sysnet.JoinHostPort(h, p.supervisorPort))
|
||||
newAddresses = append(newAddresses, net.JoinHostPort(h, p.supervisorPort))
|
||||
}
|
||||
return newAddresses
|
||||
}
|
||||
|
|
@ -143,7 +142,7 @@ func (p *proxy) SetAPIServerPort(port int, isIPv6 bool) error {
|
|||
return pkgerrors.WithMessagef(err, "failed to parse server URL %s", p.initialSupervisorURL)
|
||||
}
|
||||
p.apiServerPort = strconv.Itoa(port)
|
||||
u.Host = sysnet.JoinHostPort(u.Hostname(), p.apiServerPort)
|
||||
u.Host = net.JoinHostPort(u.Hostname(), p.apiServerPort)
|
||||
|
||||
if p.lbEnabled && p.apiServerLB == nil {
|
||||
lbServerPort := p.lbServerPort
|
||||
|
|
@ -170,14 +169,14 @@ func (p *proxy) SetAPIServerPort(port int, isIPv6 bool) error {
|
|||
// supervisor must be used to bootstrap the agent config, but then switched over to
|
||||
// another node running an apiserver once one is available.
|
||||
func (p *proxy) SetSupervisorDefault(address string) {
|
||||
host, port, err := sysnet.SplitHostPort(address)
|
||||
host, port, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse address %s, dropping: %v", address, err)
|
||||
return
|
||||
}
|
||||
if p.apiServerEnabled {
|
||||
port = p.supervisorPort
|
||||
address = sysnet.JoinHostPort(host, port)
|
||||
address = net.JoinHostPort(host, port)
|
||||
}
|
||||
p.fallbackSupervisorAddress = address
|
||||
if p.supervisorLB == nil {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/metrics/prometheus/restclient" // for client metric registration
|
||||
_ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
|
||||
|
|
@ -34,7 +33,7 @@ import (
|
|||
)
|
||||
|
||||
func Agent(ctx context.Context, nodeConfig *daemonconfig.Node, proxy proxy.Proxy) error {
|
||||
logsapi.ReapplyHandling = logsapi.ReapplyHandlingIgnoreUnchanged
|
||||
logsv1.ReapplyHandling = logsv1.ReapplyHandlingIgnoreUnchanged
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
apisv1 "github.com/k3s-io/api/k3s.cattle.io/v1"
|
||||
k3s "github.com/k3s-io/api/k3s.cattle.io/v1"
|
||||
controllersv1 "github.com/k3s-io/api/pkg/generated/controllers/k3s.cattle.io/v1"
|
||||
"github.com/k3s-io/k3s/pkg/etcd/snapshot"
|
||||
|
|
@ -70,7 +69,7 @@ func registerSnapshotHandlers(ctx context.Context, etcd *ETCD) {
|
|||
go wait.JitterUntil(func() { snapshots.Enqueue(reconcileKey) }, reconcileInterval, 0.04, false, ctx.Done())
|
||||
}
|
||||
|
||||
func (e *etcdSnapshotHandler) sync(key string, esf *apisv1.ETCDSnapshotFile) (*apisv1.ETCDSnapshotFile, error) {
|
||||
func (e *etcdSnapshotHandler) sync(key string, esf *k3s.ETCDSnapshotFile) (*k3s.ETCDSnapshotFile, error) {
|
||||
if key == reconcileKey {
|
||||
err := e.reconcile()
|
||||
if err == errNotReconciled {
|
||||
|
|
@ -149,7 +148,7 @@ func (e *etcdSnapshotHandler) sync(key string, esf *apisv1.ETCDSnapshotFile) (*a
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func (e *etcdSnapshotHandler) onRemove(key string, esf *apisv1.ETCDSnapshotFile) (*apisv1.ETCDSnapshotFile, error) {
|
||||
func (e *etcdSnapshotHandler) onRemove(key string, esf *k3s.ETCDSnapshotFile) (*k3s.ETCDSnapshotFile, error) {
|
||||
if esf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -220,7 +219,7 @@ func (e *etcdSnapshotHandler) reconcile() error {
|
|||
logrus.Infof("Reconciling snapshot ConfigMap data")
|
||||
|
||||
// Get a list of existing snapshots
|
||||
snapshots := map[string]*apisv1.ETCDSnapshotFile{}
|
||||
snapshots := map[string]*k3s.ETCDSnapshotFile{}
|
||||
snapshotPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (k8sruntime.Object, error) { return e.snapshots.List(opts) }))
|
||||
snapshotPager.PageSize = snapshotListPageSize
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue