gomod: change projectcalico/calico to kubesphere/calico (#5557)
* chore(calico): update calico to 3.25.0 * chore(calico): replace projectcalico/calico to kubesphere/calico Signed-off-by: root <renyunkang@kubesphere.io> --------- Signed-off-by: root <renyunkang@kubesphere.io>
This commit is contained in:
27
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD
generated
vendored
Normal file
27
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library",
|
||||
],
|
||||
)
|
||||
160
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go
generated
vendored
Normal file
160
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
KindBGPConfiguration = "BGPConfiguration"
|
||||
KindBGPConfigurationList = "BGPConfigurationList"
|
||||
)
|
||||
|
||||
type BindMode string
|
||||
|
||||
const (
|
||||
BindModeNone BindMode = "None"
|
||||
BindModeNodeIP BindMode = "NodeIP"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BGPConfigurationList is a list of BGPConfiguration resources.
|
||||
type BGPConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []BGPConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type BGPConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec BGPConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// BGPConfigurationSpec contains the values of the BGP configuration.
|
||||
type BGPConfigurationSpec struct {
|
||||
// LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO]
|
||||
LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel" confignamev1:"loglevel"`
|
||||
|
||||
// NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true]
|
||||
NodeToNodeMeshEnabled *bool `json:"nodeToNodeMeshEnabled,omitempty" validate:"omitempty" confignamev1:"node_mesh"`
|
||||
|
||||
// ASNumber is the default AS number used by a node. [Default: 64512]
|
||||
ASNumber *numorstring.ASNumber `json:"asNumber,omitempty" validate:"omitempty" confignamev1:"as_num"`
|
||||
|
||||
// ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes Service LoadBalancer IPs.
|
||||
// Kubernetes Service status.LoadBalancer.Ingress IPs will only be advertised if they are within one of these blocks.
|
||||
ServiceLoadBalancerIPs []ServiceLoadBalancerIPBlock `json:"serviceLoadBalancerIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_loadbalancer_ips"`
|
||||
|
||||
// ServiceExternalIPs are the CIDR blocks for Kubernetes Service External IPs.
|
||||
// Kubernetes Service ExternalIPs will only be advertised if they are within one of these blocks.
|
||||
ServiceExternalIPs []ServiceExternalIPBlock `json:"serviceExternalIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_external_ips"`
|
||||
|
||||
// ServiceClusterIPs are the CIDR blocks from which service cluster IPs are allocated.
|
||||
// If specified, Calico will advertise these blocks, as well as any cluster IPs within them.
|
||||
ServiceClusterIPs []ServiceClusterIPBlock `json:"serviceClusterIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_cluster_ips"`
|
||||
|
||||
// Communities is a list of BGP community values and their arbitrary names for tagging routes.
|
||||
Communities []Community `json:"communities,omitempty" validate:"omitempty,dive" confignamev1:"communities"`
|
||||
|
||||
// PrefixAdvertisements contains per-prefix advertisement configuration.
|
||||
PrefixAdvertisements []PrefixAdvertisement `json:"prefixAdvertisements,omitempty" validate:"omitempty,dive" confignamev1:"prefix_advertisements"`
|
||||
|
||||
// ListenPort is the port where BGP protocol should listen. Defaults to 179
|
||||
// +kubebuilder:validation:Minimum:=1
|
||||
// +kubebuilder:validation:Maximum:=65535
|
||||
ListenPort uint16 `json:"listenPort,omitempty" validate:"omitempty,gt=0" confignamev1:"listen_port"`
|
||||
|
||||
// Optional BGP password for full node-to-mesh peerings.
|
||||
// This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled
|
||||
// +optional
|
||||
NodeMeshPassword *BGPPassword `json:"nodeMeshPassword,omitempty" validate:"omitempty" confignamev1:"node_mesh_password"`
|
||||
|
||||
// Time to allow for software restart for node-to-mesh peerings. When specified, this is configured
|
||||
// as the graceful restart timeout. When not specified, the BIRD default of 120s is used.
|
||||
// This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled
|
||||
// +optional
|
||||
NodeMeshMaxRestartTime *metav1.Duration `json:"nodeMeshMaxRestartTime,omitempty" confignamev1:"node_mesh_restart_time"`
|
||||
|
||||
// BindMode indicates whether to listen for BGP connections on all addresses (None)
|
||||
// or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP).
|
||||
// Default behaviour is to listen for BGP connections on all addresses.
|
||||
// +optional
|
||||
BindMode *BindMode `json:"bindMode,omitempty"`
|
||||
|
||||
// IgnoredInterfaces indicates the network interfaces that needs to be excluded when reading device routes.
|
||||
// +optional
|
||||
IgnoredInterfaces []string `json:"ignoredInterfaces,omitempty" validate:"omitempty,dive,ignoredInterface"`
|
||||
}
|
||||
|
||||
// ServiceLoadBalancerIPBlock represents a single allowed LoadBalancer IP CIDR block.
|
||||
type ServiceLoadBalancerIPBlock struct {
|
||||
CIDR string `json:"cidr,omitempty" validate:"omitempty,net"`
|
||||
}
|
||||
|
||||
// ServiceExternalIPBlock represents a single allowed External IP CIDR block.
|
||||
type ServiceExternalIPBlock struct {
|
||||
CIDR string `json:"cidr,omitempty" validate:"omitempty,net"`
|
||||
}
|
||||
|
||||
// ServiceClusterIPBlock represents a single allowed ClusterIP CIDR block.
|
||||
type ServiceClusterIPBlock struct {
|
||||
CIDR string `json:"cidr,omitempty" validate:"omitempty,net"`
|
||||
}
|
||||
|
||||
// Community contains standard or large community value and its name.
|
||||
type Community struct {
|
||||
// Name given to community value.
|
||||
Name string `json:"name,omitempty" validate:"required,name"`
|
||||
// Value must be of format `aa:nn` or `aa:nn:mm`.
|
||||
// For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number.
|
||||
// For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number.
|
||||
// Where, `aa` is an AS Number, `nn` and `mm` are per-AS identifier.
|
||||
// +kubebuilder:validation:Pattern=`^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$`
|
||||
Value string `json:"value,omitempty" validate:"required"`
|
||||
}
|
||||
|
||||
// PrefixAdvertisement configures advertisement properties for the specified CIDR.
|
||||
type PrefixAdvertisement struct {
|
||||
// CIDR for which properties should be advertised.
|
||||
CIDR string `json:"cidr,omitempty" validate:"required,net"`
|
||||
// Communities can be list of either community names already defined in `Specs.Communities` or community value of format `aa:nn` or `aa:nn:mm`.
|
||||
// For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number.
|
||||
// For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number.
|
||||
// Where,`aa` is an AS Number, `nn` and `mm` are per-AS identifier.
|
||||
Communities []string `json:"communities,omitempty" validate:"required"`
|
||||
}
|
||||
|
||||
// New BGPConfiguration creates a new (zeroed) BGPConfiguration struct with the TypeMetadata
|
||||
// initialized to the current version.
|
||||
func NewBGPConfiguration() *BGPConfiguration {
|
||||
return &BGPConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBGPConfiguration,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
106
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go
generated
vendored
Normal file
106
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright (c) 2022 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindBGPFilter = "BGPFilter"
|
||||
KindBGPFilterList = "BGPFilterList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BGPFilterList is a list of BGPFilter resources.
|
||||
type BGPFilterList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []BGPFilter `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type BGPFilter struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec BGPFilterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// BGPFilterSpec contains the IPv4 and IPv6 filter rules of the BGP Filter.
|
||||
type BGPFilterSpec struct {
|
||||
// The ordered set of IPv4 BGPFilter rules acting on exporting routes to a peer.
|
||||
ExportV4 []BGPFilterRuleV4 `json:"exportV4,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// The ordered set of IPv4 BGPFilter rules acting on importing routes from a peer.
|
||||
ImportV4 []BGPFilterRuleV4 `json:"importV4,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// The ordered set of IPv6 BGPFilter rules acting on exporting routes to a peer.
|
||||
ExportV6 []BGPFilterRuleV6 `json:"exportV6,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// The ordered set of IPv6 BGPFilter rules acting on importing routes from a peer.
|
||||
ImportV6 []BGPFilterRuleV6 `json:"importV6,omitempty" validate:"omitempty,dive"`
|
||||
}
|
||||
|
||||
// BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR.
|
||||
type BGPFilterRuleV4 struct {
|
||||
CIDR string `json:"cidr" validate:"required,netv4"`
|
||||
|
||||
MatchOperator BGPFilterMatchOperator `json:"matchOperator" validate:"required,matchOperator"`
|
||||
|
||||
Action BGPFilterAction `json:"action" validate:"required,filterAction"`
|
||||
}
|
||||
|
||||
// BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR.
|
||||
type BGPFilterRuleV6 struct {
|
||||
CIDR string `json:"cidr" validate:"required,netv6"`
|
||||
|
||||
MatchOperator BGPFilterMatchOperator `json:"matchOperator" validate:"required,matchOperator"`
|
||||
|
||||
Action BGPFilterAction `json:"action" validate:"required,filterAction"`
|
||||
}
|
||||
|
||||
type BGPFilterMatchOperator string
|
||||
|
||||
const (
|
||||
Equal BGPFilterMatchOperator = "Equal"
|
||||
NotEqual = "NotEqual"
|
||||
In = "In"
|
||||
NotIn = "NotIn"
|
||||
)
|
||||
|
||||
type BGPFilterAction string
|
||||
|
||||
const (
|
||||
Accept BGPFilterAction = "Accept"
|
||||
Reject = "Reject"
|
||||
)
|
||||
|
||||
// New BGPFilter creates a new (zeroed) BGPFilter struct with the TypeMetadata
|
||||
// initialized to the current version.
|
||||
func NewBGPFilter() *BGPFilter {
|
||||
return &BGPFilter{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBGPFilter,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
141
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go
generated
vendored
Normal file
141
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2017,2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
k8sv1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
KindBGPPeer = "BGPPeer"
|
||||
KindBGPPeerList = "BGPPeerList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BGPPeerList is a list of BGPPeer resources.
|
||||
type BGPPeerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []BGPPeer `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type BGPPeer struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec BGPPeerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// BGPPeerSpec contains the specification for a BGPPeer resource.
|
||||
type BGPPeerSpec struct {
|
||||
// The node name identifying the Calico node instance that is targeted by this peer.
|
||||
// If this is not set, and no nodeSelector is specified, then this BGP peer selects all
|
||||
// nodes in the cluster.
|
||||
// +optional
|
||||
Node string `json:"node,omitempty" validate:"omitempty,name"`
|
||||
|
||||
// Selector for the nodes that should have this peering. When this is set, the Node
|
||||
// field must be empty.
|
||||
// +optional
|
||||
NodeSelector string `json:"nodeSelector,omitempty" validate:"omitempty,selector"`
|
||||
|
||||
// The IP address of the peer followed by an optional port number to peer with.
|
||||
// If port number is given, format should be `[<IPv6>]:port` or `<IPv4>:<port>` for IPv4.
|
||||
// If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node
|
||||
// with ListenPort set in BGPConfiguration, then we use that port to peer.
|
||||
// +optional
|
||||
PeerIP string `json:"peerIP,omitempty" validate:"omitempty,IP:port"`
|
||||
|
||||
// The AS Number of the peer.
|
||||
// +optional
|
||||
ASNumber numorstring.ASNumber `json:"asNumber,omitempty"`
|
||||
|
||||
// Selector for the remote nodes to peer with. When this is set, the PeerIP and
|
||||
// ASNumber fields must be empty. For each peering between the local node and
|
||||
// selected remote nodes, we configure an IPv4 peering if both ends have
|
||||
// NodeBGPSpec.IPv4Address specified, and an IPv6 peering if both ends have
|
||||
// NodeBGPSpec.IPv6Address specified. The remote AS number comes from the remote
|
||||
// node's NodeBGPSpec.ASNumber, or the global default if that is not set.
|
||||
// +optional
|
||||
PeerSelector string `json:"peerSelector,omitempty" validate:"omitempty,selector"`
|
||||
|
||||
// Option to keep the original nexthop field when routes are sent to a BGP Peer.
|
||||
// Setting "true" configures the selected BGP Peers node to use the "next hop keep;"
|
||||
// instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg".
|
||||
KeepOriginalNextHop bool `json:"keepOriginalNextHop,omitempty"`
|
||||
|
||||
// Optional BGP password for the peerings generated by this BGPPeer resource.
|
||||
Password *BGPPassword `json:"password,omitempty" validate:"omitempty"`
|
||||
// Specifies whether and how to configure a source address for the peerings generated by
|
||||
// this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the
|
||||
// source address. "None" means not to configure a source address.
|
||||
SourceAddress SourceAddress `json:"sourceAddress,omitempty" validate:"omitempty,sourceAddress"`
|
||||
// Time to allow for software restart. When specified, this is configured as the graceful
|
||||
// restart timeout. When not specified, the BIRD default of 120s is used.
|
||||
MaxRestartTime *metav1.Duration `json:"maxRestartTime,omitempty"`
|
||||
// Maximum number of local AS numbers that are allowed in the AS path for received routes.
|
||||
// This removes BGP loop prevention and should only be used if absolutely necesssary.
|
||||
// +optional
|
||||
NumAllowedLocalASNumbers *int32 `json:"numAllowedLocalASNumbers,omitempty"`
|
||||
// TTLSecurity enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by
|
||||
// ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops
|
||||
// (edges) between the peers.
|
||||
// +optional
|
||||
TTLSecurity *uint8 `json:"ttlSecurity,omitempty"`
|
||||
|
||||
// Add an exact, i.e. /32, static route toward peer IP in order to prevent route flapping.
|
||||
// ReachableBy contains the address of the gateway which peer can be reached by.
|
||||
// +optional
|
||||
ReachableBy string `json:"reachableBy,omitempty" validate:"omitempty,reachableBy"`
|
||||
|
||||
// The ordered set of BGPFilters applied on this BGP peer.
|
||||
// +optional
|
||||
Filters []string `json:"filters,omitempty" validate:"omitempty,dive,name"`
|
||||
}
|
||||
|
||||
type SourceAddress string
|
||||
|
||||
const (
|
||||
SourceAddressUseNodeIP SourceAddress = "UseNodeIP"
|
||||
SourceAddressNone = "None"
|
||||
)
|
||||
|
||||
// BGPPassword contains ways to specify a BGP password.
|
||||
type BGPPassword struct {
|
||||
// Selects a key of a secret in the node pod's namespace.
|
||||
SecretKeyRef *k8sv1.SecretKeySelector `json:"secretKeyRef,omitempty"`
|
||||
}
|
||||
|
||||
// NewBGPPeer creates a new (zeroed) BGPPeer struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewBGPPeer() *BGPPeer {
|
||||
return &BGPPeer{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBGPPeer,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
94
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go
generated
vendored
Normal file
94
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright (c) 2022 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindBlockAffinity = "BlockAffinity"
|
||||
KindBlockAffinityList = "BlockAffinityList"
|
||||
)
|
||||
|
||||
type BlockAffinityState string
|
||||
|
||||
const (
|
||||
StateConfirmed BlockAffinityState = "confirmed"
|
||||
StatePending BlockAffinityState = "pending"
|
||||
StatePendingDeletion BlockAffinityState = "pendingDeletion"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BlockAffinity maintains a block affinity's state
|
||||
type BlockAffinity struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the BlockAffinity.
|
||||
Spec BlockAffinitySpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// BlockAffinitySpec contains the specification for a BlockAffinity resource.
|
||||
type BlockAffinitySpec struct {
|
||||
// The state of the block affinity with regard to any referenced IPAM blocks.
|
||||
State BlockAffinityState `json:"state"`
|
||||
|
||||
// The node that this block affinity is assigned to.
|
||||
Node string `json:"node"`
|
||||
|
||||
// The CIDR range this block affinity references.
|
||||
CIDR string `json:"cidr"`
|
||||
|
||||
// Deleted indicates whether or not this block affinity is disabled and is
|
||||
// used as part of race-condition prevention. When set to true, clients
|
||||
// should treat this block as if it does not exist.
|
||||
Deleted bool `json:"deleted,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BlockAffinityList contains a list of BlockAffinity resources.
|
||||
type BlockAffinityList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []BlockAffinity `json:"items"`
|
||||
}
|
||||
|
||||
// NewBlockAffinity creates a new (zeroed) BlockAffinity struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewBlockAffinity() *BlockAffinity {
|
||||
return &BlockAffinity{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBlockAffinity,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockAffinityList creates a new (zeroed) BlockAffinityList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewBlockAffinityList() *BlockAffinityList {
|
||||
return &BlockAffinityList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBlockAffinityList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017, 2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -23,16 +23,26 @@ const (
|
||||
KindClusterInformationList = "ClusterInformationList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterInformation contains the cluster specific information.
|
||||
type ClusterInformation struct {
|
||||
// ClusterInformationList is a list of ClusterInformation objects.
|
||||
type ClusterInformationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the ClusterInformation.
|
||||
Spec ClusterInformationSpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []ClusterInformation `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type ClusterInformation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec ClusterInformationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// ClusterInformationSpec contains the values of describing the cluster.
|
||||
@@ -45,17 +55,9 @@ type ClusterInformationSpec struct {
|
||||
CalicoVersion string `json:"calicoVersion,omitempty" validate:"omitempty"`
|
||||
// DatastoreReady is used during significant datastore migrations to signal to components
|
||||
// such as Felix that it should wait before accessing the datastore.
|
||||
DatastoreReady *bool `json:"datastoreReady"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterInformationList contains a list of ClusterInformation resources
|
||||
// (even though there should only be one).
|
||||
type ClusterInformationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []ClusterInformation `json:"items"`
|
||||
DatastoreReady *bool `json:"datastoreReady,omitempty"`
|
||||
// Variant declares which variant of Calico should be active.
|
||||
Variant string `json:"variant,omitempty"`
|
||||
}
|
||||
|
||||
// New ClusterInformation creates a new (zeroed) ClusterInformation struct with the TypeMetadata
|
||||
@@ -68,14 +70,3 @@ func NewClusterInformation() *ClusterInformation {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewClusterInformationList creates a new 9zeroed) ClusterInformationList struct with the TypeMetadata
|
||||
// initialized to the current version.
|
||||
func NewClusterInformationList() *ClusterInformationList {
|
||||
return &ClusterInformationList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindClusterInformationList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -45,4 +45,8 @@ const (
|
||||
OrchestratorCNI = "cni"
|
||||
OrchestratorDocker = "libnetwork"
|
||||
OrchestratorOpenStack = "openstack"
|
||||
|
||||
// Enum options for enable/disable fields
|
||||
Enabled = "Enabled"
|
||||
Disabled = "Disabled"
|
||||
)
|
||||
183
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go
generated
vendored
Normal file
183
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright (c) 2019-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
// Add non-generated conversion functions
|
||||
err := scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "NetworkPolicy"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "metadata.namespace":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "GlobalNetworkPolicy"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "GlobalNetworkSet"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "NetworkSet"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "metadata.namespace":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "HostEndpoint"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "IPPool"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "BGPConfiguration"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "BGPPeer"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "Profile"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "FelixConfiguration"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "KubeControllersConfiguration"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "ClusterInformation"},
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
9
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go
generated
vendored
Normal file
9
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// Package v3 is the v3 version of the API.
|
||||
// +groupName=projectcalico.org
|
||||
|
||||
package v3 // import "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
557
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go
generated
vendored
Normal file
557
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go
generated
vendored
Normal file
@@ -0,0 +1,557 @@
|
||||
// Copyright (c) 2017-2022 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FelixConfigurationList contains a list of FelixConfiguration object.
|
||||
type FelixConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []FelixConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type FelixConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec FelixConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
type IptablesBackend string
|
||||
|
||||
const (
|
||||
KindFelixConfiguration = "FelixConfiguration"
|
||||
KindFelixConfigurationList = "FelixConfigurationList"
|
||||
IptablesBackendLegacy = "Legacy"
|
||||
IptablesBackendNFTables = "NFT"
|
||||
IptablesBackendAuto = "Auto"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=DoNothing;Enable;Disable
|
||||
type AWSSrcDstCheckOption string
|
||||
|
||||
const (
|
||||
AWSSrcDstCheckOptionDoNothing AWSSrcDstCheckOption = "DoNothing"
|
||||
AWSSrcDstCheckOptionEnable = "Enable"
|
||||
AWSSrcDstCheckOptionDisable = "Disable"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Enabled;Disabled
|
||||
type FloatingIPType string
|
||||
|
||||
const (
|
||||
FloatingIPsEnabled FloatingIPType = "Enabled"
|
||||
FloatingIPsDisabled FloatingIPType = "Disabled"
|
||||
)
|
||||
|
||||
// FelixConfigurationSpec contains the values of the Felix configuration.
|
||||
type FelixConfigurationSpec struct {
|
||||
// UseInternalDataplaneDriver, if true, Felix will use its internal dataplane programming logic. If false, it
|
||||
// will launch an external dataplane driver and communicate with it over protobuf.
|
||||
UseInternalDataplaneDriver *bool `json:"useInternalDataplaneDriver,omitempty"`
|
||||
// DataplaneDriver filename of the external dataplane driver to use. Only used if UseInternalDataplaneDriver
|
||||
// is set to false.
|
||||
DataplaneDriver string `json:"dataplaneDriver,omitempty"`
|
||||
|
||||
// DataplaneWatchdogTimeout is the readiness/liveness timeout used for Felix's (internal) dataplane driver.
|
||||
// Increase this value if you experience spurious non-ready or non-live events when Felix is under heavy load.
|
||||
// Decrease the value to get felix to report non-live or non-ready more quickly. [Default: 90s]
|
||||
//
|
||||
// Deprecated: replaced by the generic HealthTimeoutOverrides.
|
||||
DataplaneWatchdogTimeout *metav1.Duration `json:"dataplaneWatchdogTimeout,omitempty" configv1timescale:"seconds"`
|
||||
|
||||
// IPv6Support controls whether Felix enables support for IPv6 (if supported by the in-use dataplane).
|
||||
IPv6Support *bool `json:"ipv6Support,omitempty" confignamev1:"Ipv6Support"`
|
||||
|
||||
// RouteRefreshInterval is the period at which Felix re-checks the routes
|
||||
// in the dataplane to ensure that no other process has accidentally broken Calico's rules.
|
||||
// Set to 0 to disable route refresh. [Default: 90s]
|
||||
RouteRefreshInterval *metav1.Duration `json:"routeRefreshInterval,omitempty" configv1timescale:"seconds"`
|
||||
// InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state.
|
||||
// The rescan can be disabled by setting the interval to 0.
|
||||
InterfaceRefreshInterval *metav1.Duration `json:"interfaceRefreshInterval,omitempty" configv1timescale:"seconds"`
|
||||
// IptablesRefreshInterval is the period at which Felix re-checks the IP sets
|
||||
// in the dataplane to ensure that no other process has accidentally broken Calico's rules.
|
||||
// Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the
|
||||
// other refresh intervals as a workaround for a Linux kernel bug that was fixed in kernel
|
||||
// version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value
|
||||
// to reduce Felix CPU usage. [Default: 10s]
|
||||
IptablesRefreshInterval *metav1.Duration `json:"iptablesRefreshInterval,omitempty" configv1timescale:"seconds"`
|
||||
// IptablesPostWriteCheckInterval is the period after Felix has done a write
|
||||
// to the dataplane that it schedules an extra read back in order to check the write was not
|
||||
// clobbered by another process. This should only occur if another application on the system
|
||||
// doesn't respect the iptables lock. [Default: 1s]
|
||||
IptablesPostWriteCheckInterval *metav1.Duration `json:"iptablesPostWriteCheckInterval,omitempty" configv1timescale:"seconds" confignamev1:"IptablesPostWriteCheckIntervalSecs"`
|
||||
// IptablesLockFilePath is the location of the iptables lock file. You may need to change this
|
||||
// if the lock file is not in its standard location (for example if you have mapped it into Felix's
|
||||
// container at a different path). [Default: /run/xtables.lock]
|
||||
IptablesLockFilePath string `json:"iptablesLockFilePath,omitempty"`
|
||||
// IptablesLockTimeout is the time that Felix will wait for the iptables lock,
|
||||
// or 0, to disable. To use this feature, Felix must share the iptables lock file with all other
|
||||
// processes that also take the lock. When running Felix inside a container, this requires the
|
||||
// /run directory of the host to be mounted into the calico/node or calico/felix container.
|
||||
// [Default: 0s disabled]
|
||||
IptablesLockTimeout *metav1.Duration `json:"iptablesLockTimeout,omitempty" configv1timescale:"seconds" confignamev1:"IptablesLockTimeoutSecs"`
|
||||
// IptablesLockProbeInterval is the time that Felix will wait between
|
||||
// attempts to acquire the iptables lock if it is not available. Lower values make Felix more
|
||||
// responsive when the lock is contended, but use more CPU. [Default: 50ms]
|
||||
IptablesLockProbeInterval *metav1.Duration `json:"iptablesLockProbeInterval,omitempty" configv1timescale:"milliseconds" confignamev1:"IptablesLockProbeIntervalMillis"`
|
||||
// FeatureDetectOverride is used to override feature detection based on auto-detected platform
|
||||
// capabilities. Values are specified in a comma separated list with no spaces, example;
|
||||
// "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will
|
||||
// force the feature, empty or omitted values are auto-detected.
|
||||
FeatureDetectOverride string `json:"featureDetectOverride,omitempty" validate:"omitempty,keyValueList"`
|
||||
// FeatureGates is used to enable or disable tech-preview Calico features.
|
||||
// Values are specified in a comma separated list with no spaces, example;
|
||||
// "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is
|
||||
// used to enable features that are not fully production ready.
|
||||
FeatureGates string `json:"featureGates,omitempty" validate:"omitempty,keyValueList"`
|
||||
// IpsetsRefreshInterval is the period at which Felix re-checks all iptables
|
||||
// state to ensure that no other process has accidentally broken Calico's rules. Set to 0 to
|
||||
// disable iptables refresh. [Default: 90s]
|
||||
IpsetsRefreshInterval *metav1.Duration `json:"ipsetsRefreshInterval,omitempty" configv1timescale:"seconds"`
|
||||
MaxIpsetSize *int `json:"maxIpsetSize,omitempty"`
|
||||
// IptablesBackend specifies which backend of iptables will be used. The default is Auto.
|
||||
IptablesBackend *IptablesBackend `json:"iptablesBackend,omitempty" validate:"omitempty,iptablesBackend"`
|
||||
|
||||
// XDPRefreshInterval is the period at which Felix re-checks all XDP state to ensure that no
|
||||
// other process has accidentally broken Calico's BPF maps or attached programs. Set to 0 to
|
||||
// disable XDP refresh. [Default: 90s]
|
||||
XDPRefreshInterval *metav1.Duration `json:"xdpRefreshInterval,omitempty" configv1timescale:"seconds"`
|
||||
|
||||
NetlinkTimeout *metav1.Duration `json:"netlinkTimeout,omitempty" configv1timescale:"seconds" confignamev1:"NetlinkTimeoutSecs"`
|
||||
|
||||
// MetadataAddr is the IP address or domain name of the server that can answer VM queries for
|
||||
// cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in
|
||||
// Ubuntu, nova-api-metadata). A value of none (case insensitive) means that Felix should not
|
||||
// set up any NAT rule for the metadata path. [Default: 127.0.0.1]
|
||||
MetadataAddr string `json:"metadataAddr,omitempty"`
|
||||
// MetadataPort is the port of the metadata server. This, combined with global.MetadataAddr (if
|
||||
// not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
|
||||
// In most cases this should not need to be changed [Default: 8775].
|
||||
MetadataPort *int `json:"metadataPort,omitempty"`
|
||||
|
||||
// OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region
|
||||
// Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel,
|
||||
// or in felix.cfg or the environment on each compute node), and must match the [calico]
|
||||
// openstack_region value configured in neutron.conf on each node. [Default: Empty]
|
||||
OpenstackRegion string `json:"openstackRegion,omitempty"`
|
||||
|
||||
// InterfacePrefix is the interface name prefix that identifies workload endpoints and so distinguishes
|
||||
// them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators
|
||||
// configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value,
|
||||
// and our OpenStack integration sets the 'tap' value. [Default: cali]
|
||||
InterfacePrefix string `json:"interfacePrefix,omitempty"`
|
||||
// InterfaceExclude is a comma-separated list of interfaces that Felix should exclude when monitoring for host
|
||||
// endpoints. The default value ensures that Felix ignores Kubernetes' IPVS dummy interface, which is used
|
||||
// internally by kube-proxy. If you want to exclude multiple interface names using a single value, the list
|
||||
// supports regular expressions. For regular expressions you must wrap the value with '/'. For example
|
||||
// having values '/^kube/,veth1' will exclude all interfaces that begin with 'kube' and also the interface
|
||||
// 'veth1'. [Default: kube-ipvs0]
|
||||
InterfaceExclude string `json:"interfaceExclude,omitempty"`
|
||||
|
||||
// ChainInsertMode controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule
|
||||
// at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents
|
||||
// Calico's rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains
|
||||
// signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed.
|
||||
// [Default: insert]
|
||||
ChainInsertMode string `json:"chainInsertMode,omitempty"`
|
||||
// DefaultEndpointToHostAction controls what happens to traffic that goes from a workload endpoint to the host
|
||||
// itself (after the traffic hits the endpoint egress policy). By default Calico blocks traffic from workload
|
||||
// endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from
|
||||
// endpoint to host, set this parameter to RETURN or ACCEPT. Use RETURN if you have your own rules in the iptables
|
||||
// "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain
|
||||
// once it has completed processing workload endpoint egress policy. Use ACCEPT to unconditionally accept packets
|
||||
// from workloads after processing workload endpoint egress policy. [Default: Drop]
|
||||
DefaultEndpointToHostAction string `json:"defaultEndpointToHostAction,omitempty" validate:"omitempty,dropAcceptReturn"`
|
||||
IptablesFilterAllowAction string `json:"iptablesFilterAllowAction,omitempty" validate:"omitempty,acceptReturn"`
|
||||
IptablesMangleAllowAction string `json:"iptablesMangleAllowAction,omitempty" validate:"omitempty,acceptReturn"`
|
||||
// IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic
|
||||
// with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here.
|
||||
IptablesFilterDenyAction string `json:"iptablesFilterDenyAction,omitempty" validate:"omitempty,dropReject"`
|
||||
// LogPrefix is the log prefix that Felix uses when rendering LOG rules. [Default: calico-packet]
|
||||
LogPrefix string `json:"logPrefix,omitempty"`
|
||||
|
||||
// LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log]
|
||||
LogFilePath string `json:"logFilePath,omitempty"`
|
||||
|
||||
// LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]
|
||||
LogSeverityFile string `json:"logSeverityFile,omitempty" validate:"omitempty,logLevel"`
|
||||
// LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]
|
||||
LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel"`
|
||||
// LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog.
|
||||
// [Default: Info]
|
||||
LogSeveritySys string `json:"logSeveritySys,omitempty" validate:"omitempty,logLevel"`
|
||||
// LogDebugFilenameRegex controls which source code files have their Debug log output included in the logs.
|
||||
// Only logs from files with names that match the given regular expression are included. The filter only applies
|
||||
// to Debug level logs.
|
||||
LogDebugFilenameRegex string `json:"logDebugFilenameRegex,omitempty" validate:"omitempty,regexp"`
|
||||
|
||||
// IPIPEnabled overrides whether Felix should configure an IPIP interface on the host. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)]
|
||||
IPIPEnabled *bool `json:"ipipEnabled,omitempty" confignamev1:"IpInIpEnabled"`
|
||||
// IPIPMTU is the MTU to set on the tunnel device. See Configuring MTU [Default: 1440]
|
||||
IPIPMTU *int `json:"ipipMTU,omitempty" confignamev1:"IpInIpMtu"`
|
||||
|
||||
// VXLANEnabled overrides whether Felix should create the VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)]
|
||||
VXLANEnabled *bool `json:"vxlanEnabled,omitempty" confignamev1:"VXLANEnabled"`
|
||||
// VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel device. See Configuring MTU [Default: 1410]
|
||||
VXLANMTU *int `json:"vxlanMTU,omitempty"`
|
||||
// VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel device. See Configuring MTU [Default: 1390]
|
||||
VXLANMTUV6 *int `json:"vxlanMTUV6,omitempty"`
|
||||
VXLANPort *int `json:"vxlanPort,omitempty"`
|
||||
VXLANVNI *int `json:"vxlanVNI,omitempty"`
|
||||
|
||||
// AllowVXLANPacketsFromWorkloads controls whether Felix will add a rule to drop VXLAN encapsulated traffic
|
||||
// from workloads [Default: false]
|
||||
// +optional
|
||||
AllowVXLANPacketsFromWorkloads *bool `json:"allowVXLANPacketsFromWorkloads,omitempty"`
|
||||
// AllowIPIPPacketsFromWorkloads controls whether Felix will add a rule to drop IPIP encapsulated traffic
|
||||
// from workloads [Default: false]
|
||||
// +optional
|
||||
AllowIPIPPacketsFromWorkloads *bool `json:"allowIPIPPacketsFromWorkloads,omitempty"`
|
||||
|
||||
// ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable.
|
||||
// Must be non-zero in OpenStack deployments. [Default: 30s]
|
||||
ReportingInterval *metav1.Duration `json:"reportingInterval,omitempty" configv1timescale:"seconds" confignamev1:"ReportingIntervalSecs"`
|
||||
// ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]
|
||||
ReportingTTL *metav1.Duration `json:"reportingTTL,omitempty" configv1timescale:"seconds" confignamev1:"ReportingTTLSecs"`
|
||||
|
||||
EndpointReportingEnabled *bool `json:"endpointReportingEnabled,omitempty"`
|
||||
EndpointReportingDelay *metav1.Duration `json:"endpointReportingDelay,omitempty" configv1timescale:"seconds" confignamev1:"EndpointReportingDelaySecs"`
|
||||
|
||||
// IptablesMarkMask is the mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal
|
||||
// number with at least 8 bits set, none of which clash with any other mark bits in use on the system.
|
||||
// [Default: 0xff000000]
|
||||
IptablesMarkMask *uint32 `json:"iptablesMarkMask,omitempty"`
|
||||
|
||||
DisableConntrackInvalidCheck *bool `json:"disableConntrackInvalidCheck,omitempty"`
|
||||
|
||||
HealthEnabled *bool `json:"healthEnabled,omitempty"`
|
||||
HealthHost *string `json:"healthHost,omitempty"`
|
||||
HealthPort *int `json:"healthPort,omitempty"`
|
||||
// HealthTimeoutOverrides allows the internal watchdog timeouts of individual subcomponents to be
|
||||
// overridden. This is useful for working around "false positive" liveness timeouts that can occur
|
||||
// in particularly stressful workloads or if CPU is constrained. For a list of active
|
||||
// subcomponents, see Felix's logs.
|
||||
HealthTimeoutOverrides []HealthTimeoutOverride `json:"healthTimeoutOverrides,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// PrometheusMetricsEnabled enables the Prometheus metrics server in Felix if set to true. [Default: false]
|
||||
PrometheusMetricsEnabled *bool `json:"prometheusMetricsEnabled,omitempty"`
|
||||
// PrometheusMetricsHost is the host that the Prometheus metrics server should bind to. [Default: empty]
|
||||
PrometheusMetricsHost string `json:"prometheusMetricsHost,omitempty" validate:"omitempty,prometheusHost"`
|
||||
// PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. [Default: 9091]
|
||||
PrometheusMetricsPort *int `json:"prometheusMetricsPort,omitempty"`
|
||||
// PrometheusGoMetricsEnabled disables Go runtime metrics collection, which the Prometheus client does by default, when
|
||||
// set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]
|
||||
PrometheusGoMetricsEnabled *bool `json:"prometheusGoMetricsEnabled,omitempty"`
|
||||
// PrometheusProcessMetricsEnabled disables process metrics collection, which the Prometheus client does by default, when
|
||||
// set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]
|
||||
PrometheusProcessMetricsEnabled *bool `json:"prometheusProcessMetricsEnabled,omitempty"`
|
||||
// PrometheusWireGuardMetricsEnabled disables wireguard metrics collection, which the Prometheus client does by default, when
|
||||
// set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]
|
||||
PrometheusWireGuardMetricsEnabled *bool `json:"prometheusWireGuardMetricsEnabled,omitempty"`
|
||||
|
||||
// FailsafeInboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow incoming traffic to host endpoints
|
||||
// on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration.
|
||||
// For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow
|
||||
// traffic from all addresses. To disable all inbound host ports, use the value none. The default value allows ssh access
|
||||
// and DHCP.
|
||||
// [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]
|
||||
FailsafeInboundHostPorts *[]ProtoPort `json:"failsafeInboundHostPorts,omitempty"`
|
||||
// FailsafeOutboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow outgoing traffic from host endpoints
|
||||
// to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration.
|
||||
// For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow
|
||||
// traffic from all addresses. To disable all outbound host ports, use the value none. The default value opens etcd's standard
|
||||
// ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS.
|
||||
// [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, udp:53, udp:67]
|
||||
FailsafeOutboundHostPorts *[]ProtoPort `json:"failsafeOutboundHostPorts,omitempty"`
|
||||
|
||||
// KubeNodePortRanges holds list of port ranges used for service node ports. Only used if felix detects kube-proxy running in ipvs mode.
|
||||
// Felix uses these ranges to separate host and workload traffic. [Default: 30000:32767].
|
||||
KubeNodePortRanges *[]numorstring.Port `json:"kubeNodePortRanges,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// PolicySyncPathPrefix is used to by Felix to communicate policy changes to external services,
|
||||
// like Application layer policy. [Default: Empty]
|
||||
PolicySyncPathPrefix string `json:"policySyncPathPrefix,omitempty"`
|
||||
|
||||
// UsageReportingEnabled reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage
|
||||
// server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. [Default: true]
|
||||
UsageReportingEnabled *bool `json:"usageReportingEnabled,omitempty"`
|
||||
// UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]
|
||||
UsageReportingInitialDelay *metav1.Duration `json:"usageReportingInitialDelay,omitempty" configv1timescale:"seconds" confignamev1:"UsageReportingInitialDelaySecs"`
|
||||
// UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]
|
||||
UsageReportingInterval *metav1.Duration `json:"usageReportingInterval,omitempty" configv1timescale:"seconds" confignamev1:"UsageReportingIntervalSecs"`
|
||||
|
||||
// NATPortRange specifies the range of ports that is used for port mapping when doing outgoing NAT. When unset the default behavior of the
|
||||
// network stack is used.
|
||||
NATPortRange *numorstring.Port `json:"natPortRange,omitempty"`
|
||||
|
||||
// NATOutgoingAddress specifies an address to use when performing source NAT for traffic in a natOutgoing pool that
|
||||
// is leaving the network. By default the address used is an address on the interface the traffic is leaving on
|
||||
// (ie it uses the iptables MASQUERADE target)
|
||||
NATOutgoingAddress string `json:"natOutgoingAddress,omitempty"`
|
||||
|
||||
// This is the IPv4 source address to use on programmed device routes. By default the source address is left blank,
|
||||
// leaving the kernel to choose the source address used.
|
||||
DeviceRouteSourceAddress string `json:"deviceRouteSourceAddress,omitempty"`
|
||||
|
||||
// This is the IPv6 source address to use on programmed device routes. By default the source address is left blank,
|
||||
// leaving the kernel to choose the source address used.
|
||||
DeviceRouteSourceAddressIPv6 string `json:"deviceRouteSourceAddressIPv6,omitempty"`
|
||||
|
||||
// This defines the route protocol added to programmed device routes, by default this will be RTPROT_BOOT
|
||||
// when left blank.
|
||||
DeviceRouteProtocol *int `json:"deviceRouteProtocol,omitempty"`
|
||||
// Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external
|
||||
// applications to also add device routes. This is enabled by default which means we will remove externally added routes.
|
||||
RemoveExternalRoutes *bool `json:"removeExternalRoutes,omitempty"`
|
||||
|
||||
// ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes which may source tunnel traffic and have
|
||||
// the tunneled traffic be accepted at calico nodes.
|
||||
ExternalNodesCIDRList *[]string `json:"externalNodesList,omitempty"`
|
||||
|
||||
DebugMemoryProfilePath string `json:"debugMemoryProfilePath,omitempty"`
|
||||
DebugDisableLogDropping *bool `json:"debugDisableLogDropping,omitempty"`
|
||||
DebugSimulateCalcGraphHangAfter *metav1.Duration `json:"debugSimulateCalcGraphHangAfter,omitempty" configv1timescale:"seconds"`
|
||||
DebugSimulateDataplaneHangAfter *metav1.Duration `json:"debugSimulateDataplaneHangAfter,omitempty" configv1timescale:"seconds"`
|
||||
|
||||
IptablesNATOutgoingInterfaceFilter string `json:"iptablesNATOutgoingInterfaceFilter,omitempty" validate:"omitempty,ifaceFilter"`
|
||||
|
||||
// SidecarAccelerationEnabled enables experimental sidecar acceleration [Default: false]
|
||||
SidecarAccelerationEnabled *bool `json:"sidecarAccelerationEnabled,omitempty"`
|
||||
|
||||
// XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true]
|
||||
XDPEnabled *bool `json:"xdpEnabled,omitempty" confignamev1:"XDPEnabled"`
|
||||
|
||||
// GenericXDPEnabled enables Generic XDP so network cards that don't support XDP offload or driver
|
||||
// modes can use XDP. This is not recommended since it doesn't provide better performance than
|
||||
// iptables. [Default: false]
|
||||
GenericXDPEnabled *bool `json:"genericXDPEnabled,omitempty" confignamev1:"GenericXDPEnabled"`
|
||||
|
||||
// BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false]
|
||||
BPFEnabled *bool `json:"bpfEnabled,omitempty" validate:"omitempty"`
|
||||
// BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable
|
||||
// unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and
|
||||
// cannot insert their own BPF programs to interfere with Calico's. [Default: true]
|
||||
BPFDisableUnprivileged *bool `json:"bpfDisableUnprivileged,omitempty" validate:"omitempty"`
|
||||
// BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or
|
||||
// "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`.
|
||||
// [Default: Off].
|
||||
// +optional
|
||||
BPFLogLevel string `json:"bpfLogLevel" validate:"omitempty,bpfLogLevel"`
|
||||
// BPFDataIfacePattern is a regular expression that controls which interfaces Felix should attach BPF programs to
|
||||
// in order to catch traffic to/from the network. This needs to match the interfaces that Calico workload traffic
|
||||
// flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the
|
||||
// cluster. It should not match the workload interfaces (usually named cali...).
|
||||
BPFDataIfacePattern string `json:"bpfDataIfacePattern,omitempty" validate:"omitempty,regexp"`
|
||||
// BPFL3IfacePattern is a regular expression that allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices)
|
||||
// in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows
|
||||
// over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster.
|
||||
BPFL3IfacePattern string `json:"bpfL3IfacePattern,omitempty" validate:"omitempty,regexp"`
|
||||
// BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load
|
||||
// balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services
|
||||
// and it improves the performance of pod-to-service connections. The only reason to disable it is for debugging
|
||||
// purposes. [Default: true]
|
||||
BPFConnectTimeLoadBalancingEnabled *bool `json:"bpfConnectTimeLoadBalancingEnabled,omitempty" validate:"omitempty"`
|
||||
// BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports
|
||||
// and cluster IPs) are forwarded to remote workloads. If set to "Tunnel" then both request and response traffic
|
||||
// is tunneled to the remote node. If set to "DSR", the request traffic is tunneled but the response traffic
|
||||
// is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress
|
||||
// node; this requires a permissive L2 network. [Default: Tunnel]
|
||||
BPFExternalServiceMode string `json:"bpfExternalServiceMode,omitempty" validate:"omitempty,bpfServiceMode"`
|
||||
// BPFDSROptoutCIDRs is a list of CIDRs which are excluded from DSR. That is, clients
|
||||
// in those CIDRs will accesses nodeports as if BPFExternalServiceMode was set to
|
||||
// Tunnel.
|
||||
BPFDSROptoutCIDRs *[]string `json:"bpfDSROptoutCIDRs,omitempty" validate:"omitempty,cidrs"`
|
||||
// BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an
|
||||
// external client to a local service. This mark allows us to control how packets of that
|
||||
// connection are routed within the host and how is routing interpreted by RPF check. [Default: 0]
|
||||
BPFExtToServiceConnmark *int `json:"bpfExtToServiceConnmark,omitempty" validate:"omitempty,gte=0,lte=4294967295"`
|
||||
// BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream
|
||||
// Kubernetes kube-proxy's iptables chains. Should only be enabled if kube-proxy is not running. [Default: true]
|
||||
BPFKubeProxyIptablesCleanupEnabled *bool `json:"bpfKubeProxyIptablesCleanupEnabled,omitempty" validate:"omitempty"`
|
||||
// BPFKubeProxyMinSyncPeriod, in BPF mode, controls the minimum time between updates to the dataplane for Felix's
|
||||
// embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by
|
||||
// batching up more work. [Default: 1s]
|
||||
BPFKubeProxyMinSyncPeriod *metav1.Duration `json:"bpfKubeProxyMinSyncPeriod,omitempty" validate:"omitempty" configv1timescale:"seconds"`
|
||||
// BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's
|
||||
// embedded kube-proxy accepts EndpointSlices or not.
|
||||
BPFKubeProxyEndpointSlicesEnabled *bool `json:"bpfKubeProxyEndpointSlicesEnabled,omitempty" validate:"omitempty"`
|
||||
// BPFPSNATPorts sets the range from which we randomly pick a port if there is a source port
|
||||
// collision. This should be within the ephemeral range as defined by RFC 6056 (1024–65535) and
|
||||
// preferably outside the ephemeral ranges used by common operating systems. Linux uses
|
||||
// 32768–60999, while others mostly use the IANA defined range 49152–65535. It is not necessarily
|
||||
// a problem if this range overlaps with the operating systems. Both ends of the range are
|
||||
// inclusive. [Default: 20000:29999]
|
||||
BPFPSNATPorts *numorstring.Port `json:"bpfPSNATPorts,omitempty"`
|
||||
// BPFMapSizeNATFrontend sets the size for nat front end map.
|
||||
// FrontendMap should be large enough to hold an entry for each nodeport,
|
||||
// external IP and each port in each service.
|
||||
BPFMapSizeNATFrontend *int `json:"bpfMapSizeNATFrontend,omitempty"`
|
||||
// BPFMapSizeNATBackend sets the size for nat back end map.
|
||||
// This is the total number of endpoints. This is mostly
|
||||
// more than the size of the number of services.
|
||||
BPFMapSizeNATBackend *int `json:"bpfMapSizeNATBackend,omitempty"`
|
||||
BPFMapSizeNATAffinity *int `json:"bpfMapSizeNATAffinity,omitempty"`
|
||||
// BPFMapSizeRoute sets the size for the routes map. The routes map should be large enough
|
||||
// to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and
|
||||
// tunnel IPs).
|
||||
BPFMapSizeRoute *int `json:"bpfMapSizeRoute,omitempty"`
|
||||
// BPFMapSizeConntrack sets the size for the conntrack map. This map must be large enough to hold
|
||||
// an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption.
|
||||
BPFMapSizeConntrack *int `json:"bpfMapSizeConntrack,omitempty"`
|
||||
// BPFMapSizeIPSets sets the size for ipsets map. The IP sets map must be large enough to hold an entry
|
||||
// for each endpoint matched by every selector in the source/destination matches in network policy. Selectors
|
||||
// such as "all()" can result in large numbers of entries (one entry per endpoint in that case).
|
||||
BPFMapSizeIPSets *int `json:"bpfMapSizeIPSets,omitempty"`
|
||||
// BPFMapSizeIfState sets the size for ifstate map. The ifstate map must be large enough to hold an entry
|
||||
// for each device (host + workloads) on a host.
|
||||
BPFMapSizeIfState *int `json:"bpfMapSizeIfState,omitempty"`
|
||||
// BPFHostConntrackBypass Controls whether to bypass Linux conntrack in BPF mode for
|
||||
// workloads and services. [Default: true - bypass Linux conntrack]
|
||||
BPFHostConntrackBypass *bool `json:"bpfHostConntrackBypass,omitempty"`
|
||||
// BPFEnforceRPF enforce strict RPF on all host interfaces with BPF programs regardless of
|
||||
// what is the per-interfaces or global setting. Possible values are Disabled, Strict
|
||||
// or Loose. [Default: Strict]
|
||||
BPFEnforceRPF string `json:"bpfEnforceRPF,omitempty"`
|
||||
// BPFPolicyDebugEnabled when true, Felix records detailed information
|
||||
// about the BPF policy programs, which can be examined with the calico-bpf command-line tool.
|
||||
BPFPolicyDebugEnabled *bool `json:"bpfPolicyDebugEnabled,omitempty"`
|
||||
// RouteSource configures where Felix gets its routing information.
|
||||
// - WorkloadIPs: use workload endpoints to construct routes.
|
||||
// - CalicoIPAM: the default - use IPAM data to construct routes.
|
||||
RouteSource string `json:"routeSource,omitempty" validate:"omitempty,routeSource"`
|
||||
|
||||
// Calico programs additional Linux route tables for various purposes.
|
||||
// RouteTableRanges specifies a set of table index ranges that Calico should use.
|
||||
// Deprecates`RouteTableRange`, overrides `RouteTableRange`.
|
||||
RouteTableRanges *RouteTableRanges `json:"routeTableRanges,omitempty" validate:"omitempty,dive"`
|
||||
|
||||
// Deprecated in favor of RouteTableRanges.
|
||||
// Calico programs additional Linux route tables for various purposes.
|
||||
// RouteTableRange specifies the indices of the route tables that Calico should use.
|
||||
RouteTableRange *RouteTableRange `json:"routeTableRange,omitempty" validate:"omitempty"`
|
||||
|
||||
// RouteSyncDisabled will disable all operations performed on the route table. Set to true to
|
||||
// run in network-policy mode only.
|
||||
RouteSyncDisabled *bool `json:"routeSyncDisabled,omitempty"`
|
||||
|
||||
// WireguardEnabled controls whether Wireguard is enabled for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). [Default: false]
|
||||
WireguardEnabled *bool `json:"wireguardEnabled,omitempty"`
|
||||
// WireguardEnabledV6 controls whether Wireguard is enabled for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). [Default: false]
|
||||
WireguardEnabledV6 *bool `json:"wireguardEnabledV6,omitempty"`
|
||||
// WireguardListeningPort controls the listening port used by IPv4 Wireguard. [Default: 51820]
|
||||
WireguardListeningPort *int `json:"wireguardListeningPort,omitempty" validate:"omitempty,gt=0,lte=65535"`
|
||||
// WireguardListeningPortV6 controls the listening port used by IPv6 Wireguard. [Default: 51821]
|
||||
WireguardListeningPortV6 *int `json:"wireguardListeningPortV6,omitempty" validate:"omitempty,gt=0,lte=65535"`
|
||||
// WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]
|
||||
WireguardRoutingRulePriority *int `json:"wireguardRoutingRulePriority,omitempty" validate:"omitempty,gt=0,lt=32766"`
|
||||
// WireguardInterfaceName specifies the name to use for the IPv4 Wireguard interface. [Default: wireguard.cali]
|
||||
WireguardInterfaceName string `json:"wireguardInterfaceName,omitempty" validate:"omitempty,interface"`
|
||||
// WireguardInterfaceNameV6 specifies the name to use for the IPv6 Wireguard interface. [Default: wg-v6.cali]
|
||||
WireguardInterfaceNameV6 string `json:"wireguardInterfaceNameV6,omitempty" validate:"omitempty,interface"`
|
||||
// WireguardMTU controls the MTU on the IPv4 Wireguard interface. See Configuring MTU [Default: 1440]
|
||||
WireguardMTU *int `json:"wireguardMTU,omitempty"`
|
||||
// WireguardMTUV6 controls the MTU on the IPv6 Wireguard interface. See Configuring MTU [Default: 1420]
|
||||
WireguardMTUV6 *int `json:"wireguardMTUV6,omitempty"`
|
||||
// WireguardHostEncryptionEnabled controls whether Wireguard host-to-host encryption is enabled. [Default: false]
|
||||
WireguardHostEncryptionEnabled *bool `json:"wireguardHostEncryptionEnabled,omitempty"`
|
||||
// WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]
|
||||
WireguardPersistentKeepAlive *metav1.Duration `json:"wireguardKeepAlive,omitempty"`
|
||||
|
||||
// Set source-destination-check on AWS EC2 instances. Accepted value must be one of "DoNothing", "Enable" or "Disable".
|
||||
// [Default: DoNothing]
|
||||
AWSSrcDstCheck *AWSSrcDstCheckOption `json:"awsSrcDstCheck,omitempty" validate:"omitempty,oneof=DoNothing Enable Disable"`
|
||||
|
||||
// When service IP advertisement is enabled, prevent routing loops to service IPs that are
|
||||
// not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy.
|
||||
// Unless set to "Disabled", in which case such routing loops continue to be allowed.
|
||||
// [Default: Drop]
|
||||
ServiceLoopPrevention string `json:"serviceLoopPrevention,omitempty" validate:"omitempty,oneof=Drop Reject Disabled"`
|
||||
|
||||
// WorkloadSourceSpoofing controls whether pods can use the allowedSourcePrefixes annotation to send traffic with a source IP
|
||||
// address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix.
|
||||
WorkloadSourceSpoofing string `json:"workloadSourceSpoofing,omitempty" validate:"omitempty,oneof=Disabled Any"`
|
||||
|
||||
// MTUIfacePattern is a regular expression that controls which interfaces Felix should scan in order
|
||||
// to calculate the host's MTU.
|
||||
// This should not match workload interfaces (usually named cali...).
|
||||
// +optional
|
||||
MTUIfacePattern string `json:"mtuIfacePattern,omitempty" validate:"omitempty,regexp"`
|
||||
|
||||
// FloatingIPs configures whether or not Felix will program non-OpenStack floating IP addresses. (OpenStack-derived
|
||||
// floating IPs are always programmed, regardless of this setting.)
|
||||
//
|
||||
// +optional
|
||||
FloatingIPs *FloatingIPType `json:"floatingIPs,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
type HealthTimeoutOverride struct {
|
||||
Name string `json:"name"`
|
||||
Timeout metav1.Duration `json:"timeout"`
|
||||
}
|
||||
|
||||
type RouteTableRange struct {
|
||||
Min int `json:"min"`
|
||||
Max int `json:"max"`
|
||||
}
|
||||
|
||||
type RouteTableIDRange struct {
|
||||
Min int `json:"min"`
|
||||
Max int `json:"max"`
|
||||
}
|
||||
|
||||
type RouteTableRanges []RouteTableIDRange
|
||||
|
||||
func (r RouteTableRanges) NumDesignatedTables() int {
|
||||
var len int = 0
|
||||
for _, rng := range r {
|
||||
len += (rng.Max - rng.Min) + 1 // add one, since range is inclusive
|
||||
}
|
||||
|
||||
return len
|
||||
}
|
||||
|
||||
// ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified.
|
||||
type ProtoPort struct {
|
||||
Protocol string `json:"protocol"`
|
||||
Port uint16 `json:"port"`
|
||||
// +optional
|
||||
Net string `json:"net"`
|
||||
}
|
||||
|
||||
// New FelixConfiguration creates a new (zeroed) FelixConfiguration struct with the TypeMetadata
|
||||
// initialized to the current version.
|
||||
func NewFelixConfiguration() *FelixConfiguration {
|
||||
return &FelixConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindFelixConfiguration,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017,2019-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -14,42 +14,35 @@
|
||||
|
||||
package v3
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindGlobalNetworkPolicy = "GlobalNetworkPolicy"
|
||||
KindGlobalNetworkPolicyList = "GlobalNetworkPolicyList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GlobalNetworkPolicy contains information about a security Policy resource. This contains a set of
|
||||
// security rules to apply. Security policies allow a selector-based security model which can override
|
||||
// the security profiles directly referenced by an endpoint.
|
||||
//
|
||||
// Each policy must do one of the following:
|
||||
//
|
||||
// - Match the packet and apply an “allow” action; this immediately accepts the packet, skipping
|
||||
// all further policies and profiles. This is not recommended in general, because it prevents
|
||||
// further policy from being executed.
|
||||
// - Match the packet and apply a “deny” action; this drops the packet immediately, skipping all
|
||||
// further policy and profiles.
|
||||
// - Fail to match the packet; in which case the packet proceeds to the next policy. If there
|
||||
// are no more policies then the packet is dropped.
|
||||
//
|
||||
// Calico implements the security policy for each endpoint individually and only the policies that
|
||||
// have matching selectors are implemented. This ensures that the number of rules that actually need
|
||||
// to be inserted into the kernel is proportional to the number of local endpoints rather than the
|
||||
// total amount of policy.
|
||||
//
|
||||
// GlobalNetworkPolicy is globally-scoped (i.e. not Namespaced).
|
||||
type GlobalNetworkPolicy struct {
|
||||
// GlobalNetworkPolicyList is a list of Policy objects.
|
||||
type GlobalNetworkPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the Policy.
|
||||
Spec GlobalNetworkPolicySpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []GlobalNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type GlobalNetworkPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec GlobalNetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
type GlobalNetworkPolicySpec struct {
|
||||
@@ -90,7 +83,7 @@ type GlobalNetworkPolicySpec struct {
|
||||
// type in {"frontend", "backend"}
|
||||
// deployment != "dev"
|
||||
// ! has(label_name)
|
||||
Selector string `json:"selector" validate:"selector"`
|
||||
Selector string `json:"selector,omitempty" validate:"selector"`
|
||||
// Types indicates whether this policy applies to ingress, or to egress, or to both. When
|
||||
// not explicitly specified (and so the value on creation is empty or nil), Calico defaults
|
||||
// Types according to what Ingress and Egress rules are present in the policy. The
|
||||
@@ -121,16 +114,7 @@ type GlobalNetworkPolicySpec struct {
|
||||
ServiceAccountSelector string `json:"serviceAccountSelector,omitempty" validate:"selector"`
|
||||
|
||||
// NamespaceSelector is an optional field for an expression used to select a pod based on namespaces.
|
||||
NamespaceSelector string `json:"namespaceSelector,omitempty" validate"selector"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GlobalNetworkPolicyList contains a list of GlobalNetworkPolicy resources.
|
||||
type GlobalNetworkPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []GlobalNetworkPolicy `json:"items"`
|
||||
NamespaceSelector string `json:"namespaceSelector,omitempty" validate:"selector"`
|
||||
}
|
||||
|
||||
// NewGlobalNetworkPolicy creates a new (zeroed) GlobalNetworkPolicy struct with the TypeMetadata initialised to the current
|
||||
@@ -143,14 +127,3 @@ func NewGlobalNetworkPolicy() *GlobalNetworkPolicy {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewGlobalNetworkPolicyList creates a new (zeroed) GlobalNetworkPolicyList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewGlobalNetworkPolicyList() *GlobalNetworkPolicyList {
|
||||
return &GlobalNetworkPolicyList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindGlobalNetworkPolicyList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2018, 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -23,17 +23,26 @@ const (
|
||||
KindGlobalNetworkSetList = "GlobalNetworkSetList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs that share labels to
|
||||
// allow rules to refer to them via selectors. The labels of GlobalNetworkSet are not namespaced.
|
||||
type GlobalNetworkSet struct {
|
||||
// GlobalNetworkSetList is a list of NetworkSet objects.
|
||||
type GlobalNetworkSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the NetworkSet.
|
||||
Spec GlobalNetworkSetSpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []GlobalNetworkSet `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type GlobalNetworkSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec GlobalNetworkSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// GlobalNetworkSetSpec contains the specification for a NetworkSet resource.
|
||||
@@ -42,15 +51,6 @@ type GlobalNetworkSetSpec struct {
|
||||
Nets []string `json:"nets,omitempty" validate:"omitempty,dive,cidr"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GlobalNetworkSetList contains a list of NetworkSet resources.
|
||||
type GlobalNetworkSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []GlobalNetworkSet `json:"items"`
|
||||
}
|
||||
|
||||
// NewGlobalNetworkSet creates a new (zeroed) NetworkSet struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewGlobalNetworkSet() *GlobalNetworkSet {
|
||||
@@ -61,14 +61,3 @@ func NewGlobalNetworkSet() *GlobalNetworkSet {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNetworkSetList creates a new (zeroed) NetworkSetList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewGlobalNetworkSetList() *GlobalNetworkSetList {
|
||||
return &GlobalNetworkSetList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindGlobalNetworkSetList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017,2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,9 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,18 +25,26 @@ const (
|
||||
KindHostEndpointList = "HostEndpointList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// HostEndpoint contains information about a HostEndpoint resource that represents a “bare-metal”
|
||||
// interface attached to the host that is running Calico’s agent, Felix. By default, Calico doesn’t
|
||||
// apply any policy to such interfaces.
|
||||
type HostEndpoint struct {
|
||||
// HostEndpointList is a list of HostEndpoint objects.
|
||||
type HostEndpointList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the HostEndpoint.
|
||||
Spec HostEndpointSpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []HostEndpoint `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type HostEndpoint struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec HostEndpointSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// HostEndpointSpec contains the specification for a HostEndpoint resource.
|
||||
@@ -51,7 +60,7 @@ type HostEndpointSpec struct {
|
||||
// the host through the specific interface named by InterfaceName, or - when InterfaceName
|
||||
// is empty - through the specific interface that has one of the IPs in ExpectedIPs.
|
||||
// Therefore, when InterfaceName is empty, at least one expected IP must be specified. Only
|
||||
// external interfaces (such as “eth0”) are supported here; it isn't possible for a
|
||||
// external interfaces (such as "eth0") are supported here; it isn't possible for a
|
||||
// HostEndpoint to protect traffic through a specific local workload interface.
|
||||
//
|
||||
// Note: Only some kinds of policy are implemented for "*" HostEndpoints; initially just
|
||||
@@ -81,15 +90,6 @@ type EndpointPort struct {
|
||||
Port uint16 `json:"port" validate:"gt=0"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// HostEndpointList contains a list of HostEndpoint resources.
|
||||
type HostEndpointList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []HostEndpoint `json:"items"`
|
||||
}
|
||||
|
||||
// NewHostEndpoint creates a new (zeroed) HostEndpoint struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewHostEndpoint() *HostEndpoint {
|
||||
@@ -100,14 +100,3 @@ func NewHostEndpoint() *HostEndpoint {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewHostEndpointList creates a new (zeroed) HostEndpointList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewHostEndpointList() *HostEndpointList {
|
||||
return &HostEndpointList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindHostEndpointList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
68
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go
generated
vendored
Normal file
68
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2022 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindIPAMConfiguration = "IPAMConfiguration"
|
||||
KindIPAMConfigurationList = "IPAMConfigurationList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAMConfigurationList contains a list of IPAMConfiguration resources.
|
||||
type IPAMConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []IPAMConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAMConfiguration contains information about a block for IP address assignment.
|
||||
type IPAMConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec IPAMConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// IPAMConfigurationSpec contains the specification for an IPPool resource.
|
||||
type IPAMConfigurationSpec struct {
|
||||
// When StrictAffinity is true, borrowing IP addresses is not allowed.
|
||||
StrictAffinity bool `json:"strictAffinity" validate:"required"`
|
||||
|
||||
// MaxBlocksPerHost, if non-zero, is the max number of blocks that can be
|
||||
// affine to each host.
|
||||
MaxBlocksPerHost int32 `json:"maxBlocksPerHost,omitempty"`
|
||||
}
|
||||
|
||||
// NewIPAMConfiguration creates a new (zeroed) IPAMConfiguration struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewIPAMConfiguration() *IPAMConfiguration {
|
||||
return &IPAMConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPPool,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017, 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -16,9 +16,6 @@ package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv1 "github.com/projectcalico/libcalico-go/lib/apis/v1"
|
||||
"github.com/projectcalico/libcalico-go/lib/selector"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,16 +23,26 @@ const (
|
||||
KindIPPoolList = "IPPoolList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPPool contains information about a IPPool resource.
|
||||
type IPPool struct {
|
||||
// IPPoolList contains a list of IPPool resources.
|
||||
type IPPoolList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the IPPool.
|
||||
Spec IPPoolSpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []IPPool `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type IPPool struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec IPPoolSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// IPPoolSpec contains the specification for an IPPool resource.
|
||||
@@ -44,21 +51,24 @@ type IPPoolSpec struct {
|
||||
CIDR string `json:"cidr" validate:"net"`
|
||||
|
||||
// Contains configuration for VXLAN tunneling for this pool. If not specified,
|
||||
// then this is defaulted to "Never" (i.e. VXLAN tunelling is disabled).
|
||||
// then this is defaulted to "Never" (i.e. VXLAN tunneling is disabled).
|
||||
VXLANMode VXLANMode `json:"vxlanMode,omitempty" validate:"omitempty,vxlanMode"`
|
||||
|
||||
// Contains configuration for IPIP tunneling for this pool. If not specified,
|
||||
// then this is defaulted to "Never" (i.e. IPIP tunelling is disabled).
|
||||
// then this is defaulted to "Never" (i.e. IPIP tunneling is disabled).
|
||||
IPIPMode IPIPMode `json:"ipipMode,omitempty" validate:"omitempty,ipIpMode"`
|
||||
|
||||
// When nat-outgoing is true, packets sent from Calico networked containers in
|
||||
// When natOutgoing is true, packets sent from Calico networked containers in
|
||||
// this pool to destinations outside of this pool will be masqueraded.
|
||||
NATOutgoing bool `json:"natOutgoing,omitempty"`
|
||||
|
||||
// When disabled is true, Calico IPAM will not assign addresses from this pool.
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
|
||||
// The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 112 for IPv6.
|
||||
// Disable exporting routes from this IP Pool's CIDR over BGP. [Default: false]
|
||||
DisableBGPExport bool `json:"disableBGPExport,omitempty" validate:"omitempty"`
|
||||
|
||||
// The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 122 for IPv6.
|
||||
BlockSize int `json:"blockSize,omitempty"`
|
||||
|
||||
// Allows IPPool to allocate for a specific node by label selector.
|
||||
@@ -66,28 +76,23 @@ type IPPoolSpec struct {
|
||||
|
||||
// Deprecated: this field is only used for APIv1 backwards compatibility.
|
||||
// Setting this field is not allowed, this field is for internal use only.
|
||||
IPIP *apiv1.IPIPConfiguration `json:"ipip,omitempty" validate:"omitempty,mustBeNil"`
|
||||
IPIP *IPIPConfiguration `json:"ipip,omitempty" validate:"omitempty,mustBeNil"`
|
||||
|
||||
// Deprecated: this field is only used for APIv1 backwards compatibility.
|
||||
// Setting this field is not allowed, this field is for internal use only.
|
||||
NATOutgoingV1 bool `json:"nat-outgoing,omitempty" validate:"omitempty,mustBeFalse"`
|
||||
|
||||
// AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to
|
||||
// ["Tunnel", "Workload"] for back-compatibility
|
||||
AllowedUses []IPPoolAllowedUse `json:"allowedUses,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// SelectsNode determines whether or not the IPPool's nodeSelector
|
||||
// matches the labels on the given node.
|
||||
func (pool IPPool) SelectsNode(n Node) (bool, error) {
|
||||
// No node selector means that the pool matches the node.
|
||||
if len(pool.Spec.NodeSelector) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// Check for valid selector syntax.
|
||||
sel, err := selector.Parse(pool.Spec.NodeSelector)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Return whether or not the selector matches.
|
||||
return sel.Evaluate(n.Labels), nil
|
||||
}
|
||||
type IPPoolAllowedUse string
|
||||
|
||||
const (
|
||||
IPPoolAllowedUseWorkload IPPoolAllowedUse = "Workload"
|
||||
IPPoolAllowedUseTunnel = "Tunnel"
|
||||
)
|
||||
|
||||
type VXLANMode string
|
||||
|
||||
@@ -105,13 +110,29 @@ const (
|
||||
IPIPModeCrossSubnet = "CrossSubnet"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// The following definitions are only used for APIv1 backwards compatibility.
|
||||
// They are for internal use only.
|
||||
type EncapMode string
|
||||
|
||||
// IPPoolList contains a list of IPPool resources.
|
||||
type IPPoolList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []IPPool `json:"items"`
|
||||
const (
|
||||
Undefined EncapMode = ""
|
||||
Always = "always"
|
||||
CrossSubnet = "cross-subnet"
|
||||
)
|
||||
|
||||
const DefaultMode = Always
|
||||
|
||||
type IPIPConfiguration struct {
|
||||
// When enabled is true, ipip tunneling will be used to deliver packets to
|
||||
// destinations within this pool.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// The IPIP mode. This can be one of "always" or "cross-subnet". A mode
|
||||
// of "always" will also use IPIP tunneling for routing to destination IP
|
||||
// addresses within this pool. A mode of "cross-subnet" will only use IPIP
|
||||
// tunneling when the destination node is on a different subnet to the
|
||||
// originating node. The default value (if not specified) is "always".
|
||||
Mode EncapMode `json:"mode,omitempty" validate:"ipIpMode"`
|
||||
}
|
||||
|
||||
// NewIPPool creates a new (zeroed) IPPool struct with the TypeMetadata initialised to the current
|
||||
@@ -124,14 +145,3 @@ func NewIPPool() *IPPool {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewIPPoolList creates a new (zeroed) IPPoolList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewIPPoolList() *IPPoolList {
|
||||
return &IPPoolList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPPoolList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
68
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go
generated
vendored
Normal file
68
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2017, 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindIPReservation = "IPReservation"
|
||||
KindIPReservationList = "IPReservationList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPReservationList contains a list of IPReservation resources.
|
||||
type IPReservationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []IPReservation `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPReservation allows certain IP addresses to be reserved (i.e. prevented from being allocated) by Calico
|
||||
// IPAM. Reservations only block new allocations, they do not cause existing IP allocations to be released.
|
||||
// The current implementation is only suitable for reserving small numbers of IP addresses relative to the
|
||||
// size of the IP pool. If large portions of an IP pool are reserved, Calico IPAM may hunt for a long time
|
||||
// to find a non-reserved IP.
|
||||
type IPReservation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec IPReservationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// IPReservationSpec contains the specification for an IPReservation resource.
|
||||
type IPReservationSpec struct {
|
||||
// ReservedCIDRs is a list of CIDRs and/or IP addresses that Calico IPAM will exclude from new allocations.
|
||||
ReservedCIDRs []string `json:"reservedCIDRs,omitempty" validate:"cidrs,omitempty"`
|
||||
}
|
||||
|
||||
// NewIPReservation creates a new (zeroed) IPReservation struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewIPReservation() *IPReservation {
|
||||
return &IPReservation{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPReservation,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
162
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go
generated
vendored
Normal file
162
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindKubeControllersConfiguration = "KubeControllersConfiguration"
|
||||
KindKubeControllersConfigurationList = "KubeControllersConfigurationList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KubeControllersConfigurationList contains a list of KubeControllersConfiguration object.
|
||||
type KubeControllersConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []KubeControllersConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type KubeControllersConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec KubeControllersConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
Status KubeControllersConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// KubeControllersConfigurationSpec contains the values of the Kubernetes controllers configuration.
|
||||
type KubeControllersConfigurationSpec struct {
|
||||
// LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]
|
||||
LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel"`
|
||||
|
||||
// HealthChecks enables or disables support for health checks [Default: Enabled]
|
||||
HealthChecks string `json:"healthChecks,omitempty" validate:"omitempty,oneof=Enabled Disabled"`
|
||||
|
||||
// EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]
|
||||
EtcdV3CompactionPeriod *metav1.Duration `json:"etcdV3CompactionPeriod,omitempty" validate:"omitempty"`
|
||||
|
||||
// PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]
|
||||
PrometheusMetricsPort *int `json:"prometheusMetricsPort,omitempty"`
|
||||
|
||||
// Controllers enables and configures individual Kubernetes controllers
|
||||
Controllers ControllersConfig `json:"controllers"`
|
||||
|
||||
// DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling
|
||||
// is disabled.
|
||||
DebugProfilePort *int32 `json:"debugProfilePort,omitempty"`
|
||||
}
|
||||
|
||||
// ControllersConfig enables and configures individual Kubernetes controllers
|
||||
type ControllersConfig struct {
|
||||
// Node enables and configures the node controller. Enabled by default, set to nil to disable.
|
||||
Node *NodeControllerConfig `json:"node,omitempty"`
|
||||
|
||||
// Policy enables and configures the policy controller. Enabled by default, set to nil to disable.
|
||||
Policy *PolicyControllerConfig `json:"policy,omitempty"`
|
||||
|
||||
// WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable.
|
||||
WorkloadEndpoint *WorkloadEndpointControllerConfig `json:"workloadEndpoint,omitempty"`
|
||||
|
||||
// ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable.
|
||||
ServiceAccount *ServiceAccountControllerConfig `json:"serviceAccount,omitempty"`
|
||||
|
||||
// Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable.
|
||||
Namespace *NamespaceControllerConfig `json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
// NodeControllerConfig configures the node controller, which automatically cleans up configuration
|
||||
// for nodes that no longer exist. Optionally, it can create host endpoints for all Kubernetes nodes.
|
||||
type NodeControllerConfig struct {
|
||||
// ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]
|
||||
ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"`
|
||||
|
||||
// SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]
|
||||
SyncLabels string `json:"syncLabels,omitempty" validate:"omitempty,oneof=Enabled Disabled"`
|
||||
|
||||
// HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable.
|
||||
HostEndpoint *AutoHostEndpointConfig `json:"hostEndpoint,omitempty"`
|
||||
|
||||
// LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked.
|
||||
// Set to 0 to disable IP garbage collection. [Default: 15m]
|
||||
// +optional
|
||||
LeakGracePeriod *metav1.Duration `json:"leakGracePeriod,omitempty"`
|
||||
}
|
||||
|
||||
type AutoHostEndpointConfig struct {
|
||||
// AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]
|
||||
AutoCreate string `json:"autoCreate,omitempty" validate:"omitempty,oneof=Enabled Disabled"`
|
||||
}
|
||||
|
||||
// PolicyControllerConfig configures the network policy controller, which syncs Kubernetes policies
|
||||
// to Calico policies (only used for etcdv3 datastore).
|
||||
type PolicyControllerConfig struct {
|
||||
// ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]
|
||||
ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// WorkloadEndpointControllerConfig configures the workload endpoint controller, which syncs Kubernetes
|
||||
// labels to Calico workload endpoints (only used for etcdv3 datastore).
|
||||
type WorkloadEndpointControllerConfig struct {
|
||||
// ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]
|
||||
ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// ServiceAccountControllerConfig configures the service account controller, which syncs Kubernetes
|
||||
// service accounts to Calico profiles (only used for etcdv3 datastore).
|
||||
type ServiceAccountControllerConfig struct {
|
||||
// ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]
|
||||
ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// NamespaceControllerConfig configures the service account controller, which syncs Kubernetes
|
||||
// service accounts to Calico profiles (only used for etcdv3 datastore).
|
||||
type NamespaceControllerConfig struct {
|
||||
// ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]
|
||||
ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// KubeControllersConfigurationStatus represents the status of the configuration. It's useful for admins to
|
||||
// be able to see the actual config that was applied, which can be modified by environment variables on the
|
||||
// kube-controllers process.
|
||||
type KubeControllersConfigurationStatus struct {
|
||||
// RunningConfig contains the effective config that is running in the kube-controllers pod, after
|
||||
// merging the API resource with any environment variables.
|
||||
RunningConfig KubeControllersConfigurationSpec `json:"runningConfig,omitempty"`
|
||||
|
||||
// EnvironmentVars contains the environment variables on the kube-controllers that influenced
|
||||
// the RunningConfig.
|
||||
EnvironmentVars map[string]string `json:"environmentVars,omitempty"`
|
||||
}
|
||||
|
||||
// New KubeControllersConfiguration creates a new (zeroed) KubeControllersConfiguration struct with
|
||||
// the TypeMetadata initialized to the current version.
|
||||
func NewKubeControllersConfiguration() *KubeControllersConfiguration {
|
||||
return &KubeControllersConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindKubeControllersConfiguration,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017,2019,2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -14,23 +14,33 @@
|
||||
|
||||
package v3
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindNetworkPolicy = "NetworkPolicy"
|
||||
KindNetworkPolicyList = "NetworkPolicyList"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkPolicyList is a list of Policy objects.
|
||||
type NetworkPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkPolicy is the Namespaced-equivalent of the GlobalNetworkPolicy.
|
||||
type NetworkPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the Policy.
|
||||
Spec NetworkPolicySpec `json:"spec,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
type NetworkPolicySpec struct {
|
||||
@@ -71,7 +81,7 @@ type NetworkPolicySpec struct {
|
||||
// type in {"frontend", "backend"}
|
||||
// deployment != "dev"
|
||||
// ! has(label_name)
|
||||
Selector string `json:"selector" validate:"selector"`
|
||||
Selector string `json:"selector,omitempty" validate:"selector"`
|
||||
// Types indicates whether this policy applies to ingress, or to egress, or to both. When
|
||||
// not explicitly specified (and so the value on creation is empty or nil), Calico defaults
|
||||
// Types according to what Ingress and Egress are present in the policy. The
|
||||
@@ -92,15 +102,6 @@ type NetworkPolicySpec struct {
|
||||
ServiceAccountSelector string `json:"serviceAccountSelector,omitempty" validate:"selector"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkPolicyList contains a list of NetworkPolicy resources.
|
||||
type NetworkPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []NetworkPolicy `json:"items"`
|
||||
}
|
||||
|
||||
// NewNetworkPolicy creates a new (zeroed) NetworkPolicy struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewNetworkPolicy() *NetworkPolicy {
|
||||
@@ -111,14 +112,3 @@ func NewNetworkPolicy() *NetworkPolicy {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNetworkPolicyList creates a new (zeroed) NetworkPolicyList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewNetworkPolicyList() *NetworkPolicyList {
|
||||
return &NetworkPolicyList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindNetworkPolicyList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2019,2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -23,16 +23,24 @@ const (
|
||||
KindNetworkSetList = "NetworkSetList"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkSetList is a list of NetworkSet objects.
|
||||
type NetworkSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []NetworkSet `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
|
||||
type NetworkSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the NetworkSet.
|
||||
Spec NetworkSetSpec `json:"spec,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec NetworkSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// NetworkSetSpec contains the specification for a NetworkSet resource.
|
||||
@@ -41,15 +49,6 @@ type NetworkSetSpec struct {
|
||||
Nets []string `json:"nets,omitempty" validate:"omitempty,dive,cidr"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkSetList contains a list of NetworkSet resources.
|
||||
type NetworkSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []NetworkSet `json:"items"`
|
||||
}
|
||||
|
||||
// NewNetworkSet creates a new (zeroed) NetworkSet struct with the TypeMetadata initialised to the current version.
|
||||
func NewNetworkSet() *NetworkSet {
|
||||
return &NetworkSet{
|
||||
@@ -59,14 +58,3 @@ func NewNetworkSet() *NetworkSet {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNetworkSetList creates a new (zeroed) NetworkSetList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewNetworkSetList() *NetworkSetList {
|
||||
return &NetworkSetList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindNetworkSetList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
242
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go
generated
vendored
Normal file
242
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindCalicoNodeStatus = "CalicoNodeStatus"
|
||||
KindCalicoNodeStatusList = "CalicoNodeStatusList"
|
||||
)
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// CalicoNodeStatusList is a list of CalicoNodeStatus resources.
|
||||
type CalicoNodeStatusList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []CalicoNodeStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type CalicoNodeStatus struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec CalicoNodeStatusSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
Status CalicoNodeStatusStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
|
||||
}
|
||||
|
||||
// CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus resource.
|
||||
type CalicoNodeStatusSpec struct {
|
||||
// The node name identifies the Calico node instance for node status.
|
||||
Node string `json:"node,omitempty" validate:"required,name"`
|
||||
|
||||
// Classes declares the types of information to monitor for this calico/node,
|
||||
// and allows for selective status reporting about certain subsets of information.
|
||||
Classes []NodeStatusClassType `json:"classes,omitempty" validate:"required,unique"`
|
||||
|
||||
// UpdatePeriodSeconds is the period at which CalicoNodeStatus should be updated.
|
||||
// Set to 0 to disable CalicoNodeStatus refresh. Maximum update period is one day.
|
||||
UpdatePeriodSeconds *uint32 `json:"updatePeriodSeconds,omitempty" validate:"required,gte=0,lte=86400"`
|
||||
}
|
||||
|
||||
// CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus.
|
||||
// No validation needed for status since it is updated by Calico.
|
||||
type CalicoNodeStatusStatus struct {
|
||||
// LastUpdated is a timestamp representing the server time when CalicoNodeStatus object
|
||||
// last updated. It is represented in RFC3339 form and is in UTC.
|
||||
// +nullable
|
||||
LastUpdated metav1.Time `json:"lastUpdated,omitempty"`
|
||||
|
||||
// Agent holds agent status on the node.
|
||||
Agent CalicoNodeAgentStatus `json:"agent,omitempty"`
|
||||
|
||||
// BGP holds node BGP status.
|
||||
BGP CalicoNodeBGPStatus `json:"bgp,omitempty"`
|
||||
|
||||
// Routes reports routes known to the Calico BGP daemon on the node.
|
||||
Routes CalicoNodeBGPRouteStatus `json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodeAgentStatus defines the observed state of agent status on the node.
|
||||
type CalicoNodeAgentStatus struct {
|
||||
// BIRDV4 represents the latest observed status of bird4.
|
||||
BIRDV4 BGPDaemonStatus `json:"birdV4,omitempty"`
|
||||
|
||||
// BIRDV6 represents the latest observed status of bird6.
|
||||
BIRDV6 BGPDaemonStatus `json:"birdV6,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodeBGPStatus defines the observed state of BGP status on the node.
|
||||
type CalicoNodeBGPStatus struct {
|
||||
// The total number of IPv4 established bgp sessions.
|
||||
NumberEstablishedV4 int `json:"numberEstablishedV4"`
|
||||
|
||||
// The total number of IPv4 non-established bgp sessions.
|
||||
NumberNotEstablishedV4 int `json:"numberNotEstablishedV4"`
|
||||
|
||||
// The total number of IPv6 established bgp sessions.
|
||||
NumberEstablishedV6 int `json:"numberEstablishedV6"`
|
||||
|
||||
// The total number of IPv6 non-established bgp sessions.
|
||||
NumberNotEstablishedV6 int `json:"numberNotEstablishedV6"`
|
||||
|
||||
// PeersV4 represents IPv4 BGP peers status on the node.
|
||||
PeersV4 []CalicoNodePeer `json:"peersV4,omitempty"`
|
||||
|
||||
// PeersV6 represents IPv6 BGP peers status on the node.
|
||||
PeersV6 []CalicoNodePeer `json:"peersV6,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodeBGPRouteStatus defines the observed state of routes status on the node.
|
||||
type CalicoNodeBGPRouteStatus struct {
|
||||
// RoutesV4 represents IPv4 routes on the node.
|
||||
RoutesV4 []CalicoNodeRoute `json:"routesV4,omitempty"`
|
||||
|
||||
// RoutesV6 represents IPv6 routes on the node.
|
||||
RoutesV6 []CalicoNodeRoute `json:"routesV6,omitempty"`
|
||||
}
|
||||
|
||||
// BGPDaemonStatus defines the observed state of BGP daemon.
|
||||
type BGPDaemonStatus struct {
|
||||
// The state of the BGP Daemon.
|
||||
State BGPDaemonState `json:"state,omitempty"`
|
||||
|
||||
// Version of the BGP daemon
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// Router ID used by bird.
|
||||
RouterID string `json:"routerID,omitempty"`
|
||||
|
||||
// LastBootTime holds the value of lastBootTime from bird.ctl output.
|
||||
LastBootTime string `json:"lastBootTime,omitempty"`
|
||||
|
||||
// LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output.
|
||||
LastReconfigurationTime string `json:"lastReconfigurationTime,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodePeer contains the status of BGP peers on the node.
|
||||
type CalicoNodePeer struct {
|
||||
// IP address of the peer whose condition we are reporting.
|
||||
PeerIP string `json:"peerIP,omitempty" validate:"omitempty,ip"`
|
||||
|
||||
// Type indicates whether this peer is configured via the node-to-node mesh,
|
||||
// or via en explicit global or per-node BGPPeer object.
|
||||
Type BGPPeerType `json:"type,omitempty"`
|
||||
|
||||
// State is the BGP session state.
|
||||
State BGPSessionState `json:"state,omitempty"`
|
||||
|
||||
// Since the state or reason last changed.
|
||||
Since string `json:"since,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodeRoute contains the status of BGP routes on the node.
|
||||
type CalicoNodeRoute struct {
|
||||
// Type indicates if the route is being used for forwarding or not.
|
||||
Type CalicoNodeRouteType `json:"type,omitempty"`
|
||||
|
||||
// Destination of the route.
|
||||
Destination string `json:"destination,omitempty"`
|
||||
|
||||
// Gateway for the destination.
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
|
||||
// Interface for the destination
|
||||
Interface string `json:"interface,omitempty"`
|
||||
|
||||
// LearnedFrom contains information regarding where this route originated.
|
||||
LearnedFrom CalicoNodeRouteLearnedFrom `json:"learnedFrom,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoNodeRouteLearnedFrom contains the information of the source from which a routes has been learned.
|
||||
type CalicoNodeRouteLearnedFrom struct {
|
||||
// Type of the source where a route is learned from.
|
||||
SourceType CalicoNodeRouteSourceType `json:"sourceType,omitempty"`
|
||||
|
||||
// If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route.
|
||||
PeerIP string `json:"peerIP,omitempty, validate:"omitempty,ip"`
|
||||
}
|
||||
|
||||
// NewCalicoNodeStatus creates a new (zeroed) CalicoNodeStatus struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewCalicoNodeStatus() *CalicoNodeStatus {
|
||||
return &CalicoNodeStatus{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindCalicoNodeStatus,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type CalicoNodeRouteType string
|
||||
|
||||
const (
|
||||
RouteTypeFIB CalicoNodeRouteType = "FIB"
|
||||
RouteTypeRIB = "RIB"
|
||||
)
|
||||
|
||||
type CalicoNodeRouteSourceType string
|
||||
|
||||
const (
|
||||
RouteSourceTypeKernel CalicoNodeRouteSourceType = "Kernel"
|
||||
RouteSourceTypeStatic = "Static"
|
||||
RouteSourceTypeDirect = "Direct"
|
||||
RouteSourceTypeNodeMesh = "NodeMesh"
|
||||
RouteSourceTypeBGPPeer = "BGPPeer"
|
||||
)
|
||||
|
||||
type NodeStatusClassType string
|
||||
|
||||
const (
|
||||
NodeStatusClassTypeAgent NodeStatusClassType = "Agent"
|
||||
NodeStatusClassTypeBGP = "BGP"
|
||||
NodeStatusClassTypeRoutes = "Routes"
|
||||
)
|
||||
|
||||
type BGPPeerType string
|
||||
|
||||
const (
|
||||
BGPPeerTypeNodeMesh BGPPeerType = "NodeMesh"
|
||||
BGPPeerTypeNodePeer = "NodePeer"
|
||||
BGPPeerTypeGlobalPeer = "GlobalPeer"
|
||||
)
|
||||
|
||||
type BGPDaemonState string
|
||||
|
||||
const (
|
||||
BGPDaemonStateReady BGPDaemonState = "Ready"
|
||||
BGPDaemonStateNotReady = "NotReady"
|
||||
)
|
||||
|
||||
type BGPSessionState string
|
||||
|
||||
const (
|
||||
BGPSessionStateIdle BGPSessionState = "Idle"
|
||||
BGPSessionStateConnect = "Connect"
|
||||
BGPSessionStateActive = "Active"
|
||||
BGPSessionStateOpenSent = "OpenSent"
|
||||
BGPSessionStateOpenConfirm = "OpenConfirm"
|
||||
BGPSessionStateEstablished = "Established"
|
||||
BGPSessionStateClose = "Close"
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2018,2020-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,7 +15,7 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
// PolicyType enumerates the possible values of the PolicySpec Types field.
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
// and security Profiles reference rules - separated out as a list of rules for both
|
||||
// ingress and egress packet matching.
|
||||
//
|
||||
// Each positive match criteria has a negated version, prefixed with ”Not”. All the match
|
||||
// Each positive match criteria has a negated version, prefixed with "Not". All the match
|
||||
// criteria within a rule must be satisfied for a packet to match. A single rule can contain
|
||||
// the positive and negative version of a match and both must be satisfied for the rule to match.
|
||||
type Rule struct {
|
||||
@@ -60,6 +60,9 @@ type Rule struct {
|
||||
|
||||
// HTTP contains match criteria that apply to HTTP requests.
|
||||
HTTP *HTTPMatch `json:"http,omitempty" validate:"omitempty"`
|
||||
|
||||
// Metadata contains additional information for this rule
|
||||
Metadata *RuleMetadata `json:"metadata,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// HTTPPath specifies an HTTP path to match. It may be either of the form:
|
||||
@@ -93,7 +96,7 @@ type ICMPFields struct {
|
||||
// (i.e. pings).
|
||||
Type *int `json:"type,omitempty" validate:"omitempty,gte=0,lte=254"`
|
||||
// Match on a specific ICMP code. If specified, the Type value must also be specified.
|
||||
// This is a technical limitation imposed by the kernel’s iptables firewall, which
|
||||
// This is a technical limitation imposed by the kernel's iptables firewall, which
|
||||
// Calico uses to enforce the rule.
|
||||
Code *int `json:"code,omitempty" validate:"omitempty,gte=0,lte=255"`
|
||||
}
|
||||
@@ -117,10 +120,10 @@ type EntityRule struct {
|
||||
// different. One negates the set of matched endpoints, the other negates the whole match:
|
||||
//
|
||||
// Selector = "!has(my_label)" matches packets that are from other Calico-controlled
|
||||
// endpoints that do not have the label “my_label”.
|
||||
// endpoints that do not have the label "my_label".
|
||||
//
|
||||
// NotSelector = "has(my_label)" matches packets that are not from Calico-controlled
|
||||
// endpoints that do have the label “my_label”.
|
||||
// endpoints that do have the label "my_label".
|
||||
//
|
||||
// The effect is that the latter will accept packets from non-Calico sources whereas the
|
||||
// former is limited to packets from Calico-controlled endpoints.
|
||||
@@ -128,16 +131,29 @@ type EntityRule struct {
|
||||
|
||||
// NamespaceSelector is an optional field that contains a selector expression. Only traffic
|
||||
// that originates from (or terminates at) endpoints within the selected namespaces will be
|
||||
// matched. When both NamespaceSelector and Selector are defined on the same rule, then only
|
||||
// matched. When both NamespaceSelector and another selector are defined on the same rule, then only
|
||||
// workload endpoints that are matched by both selectors will be selected by the rule.
|
||||
//
|
||||
// For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting
|
||||
// only workload endpoints in the same namespace as the NetworkPolicy.
|
||||
//
|
||||
// For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting
|
||||
// only GlobalNetworkSet or HostEndpoint.
|
||||
//
|
||||
// For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload
|
||||
// endpoints across all namespaces.
|
||||
NamespaceSelector string `json:"namespaceSelector,omitempty" validate:"omitempty,selector"`
|
||||
|
||||
// Services is an optional field that contains options for matching Kubernetes Services.
|
||||
// If specified, only traffic that originates from or terminates at endpoints within the selected
|
||||
// service(s) will be matched, and only to/from each endpoint's port.
|
||||
//
|
||||
// Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets,
|
||||
// NotNets or ServiceAccounts.
|
||||
//
|
||||
// Ports and NotPorts can only be specified with Services on ingress rules.
|
||||
Services *ServiceMatch `json:"services,omitempty" validate:"omitempty"`
|
||||
|
||||
// Ports is an optional field that restricts the rule to only apply to traffic that has a
|
||||
// source (destination) port that matches one of these ranges/values. This value is a
|
||||
// list of integers or strings that represent ranges of ports.
|
||||
@@ -163,6 +179,15 @@ type EntityRule struct {
|
||||
ServiceAccounts *ServiceAccountMatch `json:"serviceAccounts,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
type ServiceMatch struct {
|
||||
// Name specifies the name of a Kubernetes Service to match.
|
||||
Name string `json:"name,omitempty" validate:"omitempty,name"`
|
||||
|
||||
// Namespace specifies the namespace of the given Service. If left empty, the rule
|
||||
// will match within this policy's namespace.
|
||||
Namespace string `json:"namespace,omitempty" validate:"omitempty,name"`
|
||||
}
|
||||
|
||||
type ServiceAccountMatch struct {
|
||||
// Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates
|
||||
// at) a pod running as a service account whose name is in the list.
|
||||
@@ -182,3 +207,8 @@ const (
|
||||
Log = "Log"
|
||||
Pass = "Pass"
|
||||
)
|
||||
|
||||
type RuleMetadata struct {
|
||||
// Annotations is a set of key value pairs that give extra information about the rule
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017,2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -14,27 +14,35 @@
|
||||
|
||||
package v3
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
KindProfile = "Profile"
|
||||
KindProfileList = "ProfileList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Profile contains the details a security profile resource. A profile is set of security rules
|
||||
// to apply on an endpoint. An endpoint (either a host endpoint or an endpoint on a workload) can
|
||||
// reference zero or more profiles. The profile rules are applied directly to the endpoint *after*
|
||||
// the selector-based security policy has been applied, and in the order the profiles are declared on the
|
||||
// endpoint.
|
||||
type Profile struct {
|
||||
// ProfileList is a list of Profile objects.
|
||||
type ProfileList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the Profile.
|
||||
Spec ProfileSpec `json:"spec,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []Profile `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type Profile struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Spec ProfileSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// ProfileSpec contains the specification for a security Profile resource.
|
||||
@@ -51,15 +59,6 @@ type ProfileSpec struct {
|
||||
LabelsToApply map[string]string `json:"labelsToApply,omitempty" validate:"omitempty,labels"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ProfileList contains a list of Profile resources.
|
||||
type ProfileList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Profile `json:"items"`
|
||||
}
|
||||
|
||||
// NewProfile creates a new (zeroed) Profile struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewProfile() *Profile {
|
||||
@@ -70,14 +69,3 @@ func NewProfile() *Profile {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewProfileList creates a new (zeroed) ProfileList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewProfileList() *ProfileList {
|
||||
return &ProfileList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindProfileList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
79
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go
generated
vendored
Normal file
79
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (c) 2019-2022 Tigera, Inc. All rights reserved.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "projectcalico.org"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v3"}
|
||||
var SchemeGroupVersionInternal = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
AllKnownTypes = []runtime.Object{
|
||||
&NetworkPolicy{},
|
||||
&NetworkPolicyList{},
|
||||
&GlobalNetworkPolicy{},
|
||||
&GlobalNetworkPolicyList{},
|
||||
&GlobalNetworkSet{},
|
||||
&GlobalNetworkSetList{},
|
||||
&HostEndpoint{},
|
||||
&HostEndpointList{},
|
||||
&IPPool{},
|
||||
&IPPoolList{},
|
||||
&IPReservation{},
|
||||
&IPReservationList{},
|
||||
&BGPConfiguration{},
|
||||
&BGPConfigurationList{},
|
||||
&BGPFilter{},
|
||||
&BGPFilterList{},
|
||||
&BGPPeer{},
|
||||
&BGPPeerList{},
|
||||
&Profile{},
|
||||
&ProfileList{},
|
||||
&FelixConfiguration{},
|
||||
&FelixConfigurationList{},
|
||||
&KubeControllersConfiguration{},
|
||||
&KubeControllersConfigurationList{},
|
||||
&ClusterInformation{},
|
||||
&ClusterInformationList{},
|
||||
&NetworkSet{},
|
||||
&NetworkSetList{},
|
||||
&CalicoNodeStatus{},
|
||||
&CalicoNodeStatusList{},
|
||||
&IPAMConfiguration{},
|
||||
&IPAMConfigurationList{},
|
||||
&BlockAffinity{},
|
||||
&BlockAffinityList{},
|
||||
&BGPFilter{},
|
||||
&BGPFilterList{},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addConversionFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion, AllKnownTypes...)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
19
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go
generated
vendored
Normal file
19
vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright (c) 2023 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -16,4 +16,7 @@
|
||||
Package numorstring implements a set of type definitions that in YAML or JSON
|
||||
format may be represented by either a number or a string.
|
||||
*/
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package numorstring
|
||||
@@ -24,14 +24,14 @@ import (
|
||||
|
||||
// Port represents either a range of numeric ports or a named port.
|
||||
//
|
||||
// - For a named port, set the PortName, leaving MinPort and MaxPort as 0.
|
||||
// - For a port range, set MinPort and MaxPort to the (inclusive) port numbers. Set
|
||||
// PortName to "".
|
||||
// - For a single port, set MinPort = MaxPort and PortName = "".
|
||||
// - For a named port, set the PortName, leaving MinPort and MaxPort as 0.
|
||||
// - For a port range, set MinPort and MaxPort to the (inclusive) port numbers. Set
|
||||
// PortName to "".
|
||||
// - For a single port, set MinPort = MaxPort and PortName = "".
|
||||
type Port struct {
|
||||
MinPort uint16
|
||||
MaxPort uint16
|
||||
PortName string `validate:"omitempty,portName"`
|
||||
MinPort uint16 `json:"minPort,omitempty"`
|
||||
MaxPort uint16 `json:"maxPort,omitempty"`
|
||||
PortName string `json:"portName" validate:"omitempty,portName"`
|
||||
}
|
||||
|
||||
// SinglePort creates a Port struct representing a single port.
|
||||
@@ -142,3 +142,13 @@ func (p Port) String() string {
|
||||
return fmt.Sprintf("%d:%d", p.MinPort, p.MaxPort)
|
||||
}
|
||||
}
|
||||
|
||||
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Port) OpenAPISchemaType() []string { return []string{"string"} }
|
||||
|
||||
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Port) OpenAPISchemaFormat() string { return "int-or-string" }
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -24,8 +24,9 @@ const (
|
||||
ProtocolSCTP = "SCTP"
|
||||
ProtocolUDPLite = "UDPLite"
|
||||
|
||||
ProtocolUDPV1 = "udp"
|
||||
ProtocolTCPV1 = "tcp"
|
||||
ProtocolUDPV1 = "udp"
|
||||
ProtocolTCPV1 = "tcp"
|
||||
ProtocolSCTPV1 = "sctp"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -119,16 +120,26 @@ func (p Protocol) NumValue() (uint8, error) {
|
||||
}
|
||||
|
||||
// SupportsProtocols returns whether this protocol supports ports. This returns true if
|
||||
// the numerical or string verion of the protocol indicates TCP (6) or UDP (17).
|
||||
// the numerical or string version of the protocol indicates TCP (6), UDP (17), or SCTP (132).
|
||||
func (p Protocol) SupportsPorts() bool {
|
||||
num, err := p.NumValue()
|
||||
if err == nil {
|
||||
return num == 6 || num == 17
|
||||
return num == 6 || num == 17 || num == 132
|
||||
} else {
|
||||
switch p.StrVal {
|
||||
case ProtocolTCP, ProtocolUDP, ProtocolTCPV1, ProtocolUDPV1:
|
||||
case ProtocolTCP, ProtocolUDP, ProtocolTCPV1, ProtocolUDPV1, ProtocolSCTP, ProtocolSCTPV1:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Protocol) OpenAPISchemaType() []string { return []string{"string"} }
|
||||
|
||||
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Protocol) OpenAPISchemaFormat() string { return "int-or-string" }
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
// inner type. This allows you to have, for example, a JSON field that can
|
||||
// accept a name or number.
|
||||
type Uint8OrString struct {
|
||||
Type NumOrStringType
|
||||
NumVal uint8
|
||||
StrVal string
|
||||
Type NumOrStringType `json:"type"`
|
||||
NumVal uint8 `json:"numVal"`
|
||||
StrVal string `json:"strVal"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface.
|
||||
@@ -78,3 +78,13 @@ func (i Uint8OrString) NumValue() (uint8, error) {
|
||||
}
|
||||
return i.NumVal, nil
|
||||
}
|
||||
|
||||
// OpenAPISchemaType is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Uint8OrString) OpenAPISchemaType() []string { return []string{"string"} }
|
||||
|
||||
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
|
||||
// the OpenAPI spec of this type.
|
||||
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
|
||||
func (_ Uint8OrString) OpenAPISchemaFormat() string { return "int-or-string" }
|
||||
557
vendor/github.com/projectcalico/calico/AUTHORS.md
generated
vendored
Normal file
557
vendor/github.com/projectcalico/calico/AUTHORS.md
generated
vendored
Normal file
@@ -0,0 +1,557 @@
|
||||
# Calico authors
|
||||
|
||||
This file is auto-generated based on contribution records reported
|
||||
by GitHub for the core repositories within the projectcalico/ organization. It is ordered alphabetically.
|
||||
|
||||
| Name | Email |
|
||||
|--------|--------|
|
||||
| Aalaesar | aalaesar@gmail.com |
|
||||
| Aaron Roydhouse | aaron@roydhouse.com |
|
||||
| Abhijeet Kasurde | akasurde@redhat.com |
|
||||
| Abhinav Dahiya | abhinav.dahiya@coreos.com |
|
||||
| Abhishek Jaisingh | abhi2254015@gmail.com |
|
||||
| Adam Hoheisel | adam.hoheisel99@gmail.com |
|
||||
| Adam Leskis | leskis@gmail.com |
|
||||
| Adam Szecówka | adam.szecowka@sap.com |
|
||||
| ahrkrak | andrew.randall@gmail.com |
|
||||
| Alan | zg.zhu@daocloud.io |
|
||||
| Alban Crequy | alban@kinvolk.io |
|
||||
| Albert Vaca | albertvaka@gmail.com |
|
||||
| Alejo Carballude | alejocarballude@gmail.com |
|
||||
| Aleksandr Didenko | adidenko@mirantis.com |
|
||||
| Aleksandr Dubinsky | almson@users.noreply.github.com |
|
||||
| Alessandro Rossi | 4215912+kubealex@users.noreply.github.com |
|
||||
| Alex Altair | alexanderaltair@gmail.com |
|
||||
| Alex Chan | github@alexwlchan.fastmail.co.uk |
|
||||
| Alex Hersh | alexander.hersh@metaswitch.com |
|
||||
| Alex Nauda | alex@alexnauda.com |
|
||||
| Alex O Regan | alexsoregan@gmail.com |
|
||||
| Alex Pollitt | lxpollitt@users.noreply.github.com |
|
||||
| Alex Rowley | rowleyaj@gmail.com |
|
||||
| Alexander Brand | alexbrand09@gmail.com |
|
||||
| Alexander Gama Espinosa | algamaes@microsoft.com |
|
||||
| Alexander Golovko | alexandro@ankalagon.ru |
|
||||
| Alexander Saprykin | asaprykin@mirantis.com |
|
||||
| Alexander Varshavsky | alex.varshavsky@tigera.io |
|
||||
| Alexey Magdich | itechart.aliaksei.mahdzich@tigera.io |
|
||||
| Alexey Makhov | makhov.alex@gmail.com |
|
||||
| Alexey Medvedchikov | alexey.medvedchikov@gmail.com |
|
||||
| alexeymagdich-tigera | 56426143+alexeymagdich-tigera@users.noreply.github.com |
|
||||
| alexhersh | hersh.a@husky.neu.edu |
|
||||
| Alina Militaru | alina@tigera.io |
|
||||
| Aloys Augustin | aloaugus@cisco.com |
|
||||
| Aloÿs | aloys.augustin@polytechnique.org |
|
||||
| Amim Knabben | amim.knabben@gmail.com |
|
||||
| amq | amq@users.noreply.github.com |
|
||||
| Anatoly Popov | aensidhe@users.noreply.github.com |
|
||||
| Anders Janmyr | anders@janmyr.com |
|
||||
| Andreas Jaeger | aj@suse.com |
|
||||
| Andrei Nistor | andrei_nistor@smart-x.net |
|
||||
| Andrew Donald Kennedy | andrew.international@gmail.com |
|
||||
| Andrew Gaffney | andrew@agaffney.org |
|
||||
| Andy Randall | andy@tigera.io |
|
||||
| Anthony ARNAUD | aarnaud@eidosmontreal.com |
|
||||
| Anthony BESCOND | anthony.bescond@kiln.fi |
|
||||
| Anthony T | 25327116+anthonytwh@users.noreply.github.com |
|
||||
| Anton Antonov | anton.synd.antonov@gmail.com |
|
||||
| Anton Klokau | anton.klokau@gmail.com |
|
||||
| anton-klokau | 54411589+anton-klokau@users.noreply.github.com |
|
||||
| Antony Guinard | antony@tigera.io |
|
||||
| Aram Alipoor | aram.alipoor@gmail.com |
|
||||
| arikachen | eaglesora@gmail.com |
|
||||
| Armon Dadgar | armon.dadgar@gmail.com |
|
||||
| Artem Panchenko | apanchenko@mirantis.com |
|
||||
| Artem Roma | aroma@mirantis.com |
|
||||
| Artem Rymarchik | artemrymarchik@gmail.com |
|
||||
| Artyom Rymarchik | artsiom.rymarchyk@itechart-group.com |
|
||||
| Arundhati Surpur | arundhati@nectechnologies.in |
|
||||
| Ashley Reese | ashley@victorianfox.com |
|
||||
| asincu | alinamilitaru@Alinas-MacBook-Pro.local |
|
||||
| Atkins | atkinschang@gmail.com |
|
||||
| Avi Deitcher | avi@deitcher.net |
|
||||
| Ayoub Elhamdani | a.elhamdani90@gmail.com |
|
||||
| Barbara McKercher | barbara@tigera.io |
|
||||
| bartek-lopatka | 54111388+bartek-lopatka@users.noreply.github.com |
|
||||
| Bassam Tabbara | bassam@symform.com |
|
||||
| Behnam Shobiri | behnam.shobeiri@gmail.com |
|
||||
| Behnam-Shobiri | Behnam.shobeiri@gmail.com |
|
||||
| Ben Schwartz | benschw@gmail.com |
|
||||
| Benjamin | info@diffus.org |
|
||||
| Benjamin S. Allen | bsallen@alcf.anl.gov |
|
||||
| Bertrand Lallau | bertrand.lallau@gmail.com |
|
||||
| Bill Hathaway | bill.hathaway@gmail.com |
|
||||
| Bill Maxwell | bill@rancher.com |
|
||||
| Billie Cleek | bcleek@monsooncommerce.com |
|
||||
| bingshen.wbs | bingshen.wbs@alibaba-inc.com |
|
||||
| bjhaid | abejideayodele@gmail.com |
|
||||
| Blake Covarrubias | blake.covarrubias@gmail.com |
|
||||
| Blucher | yfg44fox@126.com |
|
||||
| bmckercher123 | 48458529+bmckercher123@users.noreply.github.com |
|
||||
| Bogdan Dobrelya | bdobrelia@mirantis.com |
|
||||
| Brad Beam | brad.beam@b-rad.info |
|
||||
| Brad Behle | behle@us.ibm.com |
|
||||
| Brendan Creane | brendan@tigera.io |
|
||||
| Brian Ketelsen | bketelsen@gmail.com |
|
||||
| Brian Kim | brian@tigera.io |
|
||||
| Brian McMahon | brianmcmahon135@gmail.com |
|
||||
| briansan | bkimstunnaboss@gmail.com |
|
||||
| Brook-Roberts | brook.roberts@metaswitch.com |
|
||||
| Bryan Reese | bryan.mreese@gmail.com |
|
||||
| Cao Shufeng | caosf.fnst@cn.fujitsu.com |
|
||||
| Cao Xuan Hoang | hoangcx@vn.fujitsu.com |
|
||||
| Carlos Alberto | euprogramador@gmail.com |
|
||||
| Casey D | casey.davenport@metaswitch.com |
|
||||
| Casey Davenport | davenport.cas@gmail.com |
|
||||
| Cezar Sa Espinola | cezarsa@gmail.com |
|
||||
| Chakravarthy Gopi | cgopi@us.ibm.com |
|
||||
| Chance Zibolski | chance.zibolski@gmail.com |
|
||||
| Chen Donghui | chendh521@gmail.com |
|
||||
| Chengwei Yang | yangchengwei@qiyi.com |
|
||||
| chenqijun | chenqijun@corp.netease.com |
|
||||
| Chris Armstrong | chris@opdemand.com |
|
||||
| Chris Church | chris.church@gmail.com |
|
||||
| Chris Hoge | chris@hogepodge.com |
|
||||
| Chris McNabb | raizyr@gmail.com |
|
||||
| Chris Tomkins | chris.tomkins@tigera.io |
|
||||
| Christian Klauser | christianklauser@outlook.com |
|
||||
| Christian Simon | simon@swine.de |
|
||||
| Christopher | chris.tauchen@tigera.io |
|
||||
| Christopher Grim | christopher.grim@gmail.com |
|
||||
| Christopher LIJLENSTOLPE | github@cdl.asgaard.org |
|
||||
| Christopher LILJENSTOLPE | cdl@asgaard.org |
|
||||
| cinience | cinience@qq.com |
|
||||
| Ciprian Hacman | ciprian@hakman.dev |
|
||||
| Clement Laforet | sheepkiller@cotds.org |
|
||||
| Cody McCain | cody@tigera.io |
|
||||
| Cookie | luckymrwang@163.com |
|
||||
| Cory Benfield | lukasaoz@gmail.com |
|
||||
| crandl201 | christopher_randles@cable.comcast.com |
|
||||
| Cristian Vrabie | cristian.vrabie@gmail.com |
|
||||
| Cyclinder | qifeng.guo@daocloud.io |
|
||||
| Dalton Hubble | dghubble@gmail.com |
|
||||
| Dan | djosborne@users.noreply.github.com |
|
||||
| Dan (Turk) | dan@projectcalico.org |
|
||||
| Dan Bond | pm@danbond.io |
|
||||
| Dan O'Brien | dobrien.nj@gmail.com |
|
||||
| Dan Osborne | djosborne10@gmail.com |
|
||||
| Daniel Hoherd | daniel.hoherd@gmail.com |
|
||||
| Daniel Megyesi | daniel.megyesi@liligo.com |
|
||||
| Dario Nieuwenhuis | dirbaio@dirbaio.net |
|
||||
| Darren Chin | dc@darrench.in |
|
||||
| Dave Hay | david_hay@uk.ibm.com |
|
||||
| Dave Langridge | dave@calico.com |
|
||||
| David Haupt | dhaupt@redhat.com |
|
||||
| David Igou | igou.david@gmail.com |
|
||||
| David J. Wilder | wilder@us.ibm.com |
|
||||
| David Tesar | david.tesar@microsoft.com |
|
||||
| Denis Iskandarov | d.iskandarov@gmail.com |
|
||||
| depay | depay19@163.com |
|
||||
| derek mcquay | derek@tigera.io |
|
||||
| Derk Muenchhausen | derk@muenchhausen.de |
|
||||
| Didier Durand | durand.didier@gmail.com |
|
||||
| Dominic DeMarco | ddemarc@us.ibm.com |
|
||||
| Doug Collier | doug@tigera.io |
|
||||
| Doug Davis | duglin@users.noreply.github.com |
|
||||
| Doug Hellmann | doug@doughellmann.com |
|
||||
| Doug Wiegley | dwiegley@salesforce.com |
|
||||
| Dries Harnie | dries+github@harnie.be |
|
||||
| du | du@njtech.edu.cn |
|
||||
| Duan Jiong | djduanjiong@gmail.com |
|
||||
| Duong Ha-Quang | duonghq@vn.fujitsu.com |
|
||||
| Dylan Pindur | dylanpindur@gmail.com |
|
||||
| Ed Harrison | eepyaich@users.noreply.github.com |
|
||||
| Edbert | ecandra@protonmail.com |
|
||||
| Elson Rodriguez | elson.rodriguez@gmail.com |
|
||||
| emanic | emily@tigera.io |
|
||||
| Emma Gordon | emma@projectcalico.org |
|
||||
| EmmEff | mikef17@gmail.com |
|
||||
| Eran Reshef | eran.reshef@arm.com |
|
||||
| Eric Anderson | anderson@stackengine.com |
|
||||
| Eric Barch | ericb@ericbarch.com |
|
||||
| Eric Hoffmann | 31017077+2ffs2nns@users.noreply.github.com |
|
||||
| Erik Stidham | estidham@gmail.com |
|
||||
| Ernest Wong | chuwon@microsoft.com |
|
||||
| Ernesto Jiménez | me@ernesto-jimenez.com |
|
||||
| Ethan Chu | xychu2008@gmail.com |
|
||||
| Eugen Mayer | 136934+EugenMayer@users.noreply.github.com |
|
||||
| F41gh7 | info@fght.net |
|
||||
| Fabian Ruff | fabian@progra.de |
|
||||
| Fahad Arshad | fahadaliarshad@gmail.com |
|
||||
| fcuello-fudo | 51087976+fcuello-fudo@users.noreply.github.com |
|
||||
| Feilong Wang | flwang@catalyst.net.nz |
|
||||
| fen4o | martin.vladev@gmail.com |
|
||||
| Fernando Alvarez | methadato@gmail.com |
|
||||
| Fernando Cainelli | fernando.cainelli@gmail.com |
|
||||
| Fionera | fionera@fionera.de |
|
||||
| Flavio Percoco | flaper87@gmail.com |
|
||||
| Foivos Filippopoulos | foivosfilip@gmail.com |
|
||||
| frank | frank@tigera.io |
|
||||
| Frank Greco Jr | frankgreco@northwesternmutual.com |
|
||||
| François PICOT | fpicot@users.noreply.github.com |
|
||||
| Fredrik Steen | stone4x4@gmail.com |
|
||||
| freecaykes | edbert@tigera.io |
|
||||
| frnkdny | frank.danyo@gmail.com |
|
||||
| fumihiko kakuma | kakuma@valinux.co.jp |
|
||||
| Gabriel Monroy | gabriel@opdemand.com |
|
||||
| Gaurav | 48036489+realgaurav@users.noreply.github.com |
|
||||
| Gaurav Khatri | gaurav@tigera.io |
|
||||
| Gaurav Sinha | gaurav.sinha@tigera.io |
|
||||
| Gautam K | gautam.nitheesh@gmail.com |
|
||||
| gdziwoki | gdziwoki@gmail.com |
|
||||
| gengchc2 | geng.changcai2@zte.com.cn |
|
||||
| Gerard Hickey | hickey@kinetic-compute.com |
|
||||
| Giancarlo Rubio | gianrubio@gmail.com |
|
||||
| Gianluca | 52940363+gianlucam76@users.noreply.github.com |
|
||||
| Gianluca Mardente | gianluca@tigera.io |
|
||||
| Gobinath Krishnamoorthy | gobinath@tigera.io |
|
||||
| Guang Ya Liu | gyliu513@gmail.com |
|
||||
| Guangming Wang | guangming.wang@daocloud.io |
|
||||
| Guillaume LECERF | glecerf@gmail.com |
|
||||
| guirish | guirish |
|
||||
| gunboe | guntherboeckmann@gmail.com |
|
||||
| Gunjan "Grass-fed Rabbit" Patel | patelgunjan5@gmail.com |
|
||||
| GuyTempleton | guy.templeton@skyscanner.net |
|
||||
| Hagen Kuehn | hagen.kuehn@quater.io |
|
||||
| halfcrazy | hackzhuyan@gmail.com |
|
||||
| Hanamantagoud | hanamantagoud.v.kandagal@est.tech |
|
||||
| hanamantagoudvk | 68010010+hanamantagoudvk@users.noreply.github.com |
|
||||
| hedi bouattour | hbouatto@cisco.com |
|
||||
| Helen Chang | c6h3un@gmail.com |
|
||||
| Henry Gessau | gessau@gmail.com |
|
||||
| huang.zhiping | huang.zhiping@99cloud.net |
|
||||
| Huanle Han | hanhuanle@caicloud.io |
|
||||
| Hui Kang | kangh@us.ibm.com |
|
||||
| Huo Qi Feng | huoqif@cn.ibm.com |
|
||||
| Iago López Galeiras | iago@kinvolk.io |
|
||||
| ialidzhikov | i.alidjikov@gmail.com |
|
||||
| Ian Wienand | iwienand@redhat.com |
|
||||
| Icarus9913 | icaruswu66@qq.com |
|
||||
| Igor Kapkov | igasgeek@me.com |
|
||||
| Ihar Hrachyshka | ihrachys@redhat.com |
|
||||
| ijumps | “bigerjump@gmail.com” |
|
||||
| ISHIDA Wataru | ishida.wataru@lab.ntt.co.jp |
|
||||
| Ivar Larsson | ivar@bloglovin.com |
|
||||
| IWAMOTO Toshihiro | iwamoto@valinux.co.jp |
|
||||
| J. Grizzard | jgrizzard@box.com |
|
||||
| Jack Kleeman | jackkleeman@gmail.com |
|
||||
| Jacob Hayes | jacob.r.hayes@gmail.com |
|
||||
| Jade Chunnananda | jade.jch@gmail.com |
|
||||
| Jak | 44370243+jak-sdk@users.noreply.github.com |
|
||||
| James E. Blair | jeblair@redhat.com |
|
||||
| James Lucktaylor | jlucktay@users.noreply.github.com |
|
||||
| James Pollard | james@leapyear.io |
|
||||
| James Sturtevant | jsturtevant@gmail.com |
|
||||
| Jamie | 91jme@users.noreply.github.com |
|
||||
| Jan Brauer | jan@jimdo.com |
|
||||
| Jan Ivar Beddari | code@beddari.net |
|
||||
| janonymous | janonymous.codevulture@gmail.com |
|
||||
| jay vyas | jvyas@vmware.com |
|
||||
| Jean-Sebastien Mouret | js.mouret@gmail.com |
|
||||
| Jeff Schroeder | jeffschroeder@computer.org |
|
||||
| Jenkins | jenkins@review.openstack.org |
|
||||
| Jens Henrik Hertz | jens@treatwell.nl |
|
||||
| Jesper Dangaard Brouer | brouer@redhat.com |
|
||||
| Jiawei Huang | jiawei@tigera.io |
|
||||
| Jimmy McCrory | jimmy.mccrory@gmail.com |
|
||||
| jinglinax@163.com | jinglinax@163.com |
|
||||
| jmjoy | 918734043@qq.com |
|
||||
| Joanna Solmon | joanna.solmon@gmail.com |
|
||||
| Joel Bastos | kintoandar@users.noreply.github.com |
|
||||
| Johan Fleury | jfleury+github@arcaik.net |
|
||||
| Johannes M. Scheuermann | joh.scheuer@gmail.com |
|
||||
| Johannes Scheerer | johannes.scheerer@sap.com |
|
||||
| johanneswuerbach | johannes.wuerbach@googlemail.com |
|
||||
| John Engelman | john.r.engelman@gmail.com |
|
||||
| jolestar | jolestar@gmail.com |
|
||||
| Jonah Back | jonah@jonahback.com |
|
||||
| Jonathan Boulle | jonathanboulle@gmail.com |
|
||||
| Jonathan M. Wilbur | jonathan@wilbur.space |
|
||||
| Jonathan Palardy | jonathan.palardy@gmail.com |
|
||||
| Jonathan Sabo | jonathan@sabo.io |
|
||||
| Jonathan Sokolowski | jonathan.sokolowski@gmail.com |
|
||||
| jose-bigio | jose.bigio@docker.com |
|
||||
| Joseph Gu | aceralon@outlook.com |
|
||||
| Josh Conant | deathbeforedishes@gmail.com |
|
||||
| Josh Lucas | josh.lucas@tigera.io |
|
||||
| joshti | 56737865+joshti@users.noreply.github.com |
|
||||
| Joshua Allard | josh@tigera.io |
|
||||
| joshuactm | joshua.colvin@ticketmaster.com |
|
||||
| Julien Dehee | PrFalken@users.noreply.github.com |
|
||||
| Jussi Nummelin | jussi.nummelin@digia.com |
|
||||
| Justin | justin@tigera.io |
|
||||
| Justin Burnham | justin@jburnham.net |
|
||||
| Justin Cattle | j@ocado.com |
|
||||
| Justin Nauman | justin.r.nauman+github@gmail.com |
|
||||
| Justin Pacheco | jpacheco39@bloomberg.net |
|
||||
| Justin Sievenpiper | justin@sievenpiper.co |
|
||||
| JW Bell | bjwbell@gmail.com |
|
||||
| Kamil Madac | kamil.madac@gmail.com |
|
||||
| Karl Matthias | karl.matthias@gonitro.com |
|
||||
| Karthik Gaekwad | karthik.gaekwad@gmail.com |
|
||||
| Karthik Krishnan Ramasubramanian | mail@karthikkrishnan.me |
|
||||
| Kashif Saadat | kashifsaadat@gmail.com |
|
||||
| Kelsey Hightower | kelsey.hightower@gmail.com |
|
||||
| Ketan Kulkarni | ketkulka@gmail.com |
|
||||
| Kevin Benton | blak111@gmail.com |
|
||||
| Kevin Lynch | klynch@gmail.com |
|
||||
| Kiran Divekar | calsoft.kiran.divekar@tigera.io |
|
||||
| Kirill Buev | kirill.buev@pm.me |
|
||||
| Kris Gambirazzi | kris.gambirazzi@transferwise.com |
|
||||
| Krzesimir Nowak | krzesimir@kinvolk.io |
|
||||
| Krzysztof Cieplucha | krisiasty@users.noreply.github.com |
|
||||
| l1b0k | libokang.dev@gmail.com |
|
||||
| Lance Robson | lancelot.robson@gmail.com |
|
||||
| Lancelot Robson | lancelot.robson@metaswitch.com |
|
||||
| Lars Ekman | lars.g.ekman@est.tech |
|
||||
| Laurence Man | laurence@tigera.io |
|
||||
| Le Hou | houl7@chinaunicom.cn |
|
||||
| Lee Briggs | lbriggs@apptio.com |
|
||||
| Leo Ochoa | leo8a@users.noreply.github.com |
|
||||
| Li-zhigang | li.zhigang3@zte.com.cn |
|
||||
| libby kent | viskcode@gmail.com |
|
||||
| lilintan | lintan.li@easystack.cn |
|
||||
| LinYushen | linyushen@qiniu.com |
|
||||
| lippertmarkus | lippertmarkus@gmx.de |
|
||||
| LittleBoy18 | 2283985296@qq.com |
|
||||
| liubog2008 | liubog2008@gmail.com |
|
||||
| Liz Rice | liz@lizrice.com |
|
||||
| llr | nightmeng@gmail.com |
|
||||
| Logan Davis | 38335829+logand22@users.noreply.github.com |
|
||||
| Logan V | logan2211@gmail.com |
|
||||
| lou-lan | loulan@loulan.me |
|
||||
| Luiz Filho | luizbafilho@gmail.com |
|
||||
| Luke Mino-Altherr | luke.mino-altherr@metaswitch.com |
|
||||
| luobily | luobily@gmail.com |
|
||||
| Luthfi Anandra | luthfi.anandra@gmail.com |
|
||||
| Lv Jiawei | lvjiawei@cmss.chinamobile.com |
|
||||
| maao | maao@cmss.chinamobile.com |
|
||||
| Manjunath A Kumatagi | mkumatag@in.ibm.com |
|
||||
| Manuel Buil | mbuil@suse.com |
|
||||
| Marga Millet | marga.sfo@gmail.com |
|
||||
| Marius Grigaitis | marius.grigaitis@home24.de |
|
||||
| Mark Fermor | markfermor@holidayextras.com |
|
||||
| Mark Petrovic | mspetrovic@gmail.com |
|
||||
| markruler | csu0414@gmail.com |
|
||||
| Marlin Cremers | marlinc@marlinc.nl |
|
||||
| Marshall Ford | inbox@marshallford.me |
|
||||
| Martijn Koster | mak-github@greenhills.co.uk |
|
||||
| Martin Evgeniev | suizman@users.noreply.github.com |
|
||||
| marvin-tigera | marvin-tigera@users.noreply.github.com |
|
||||
| Mat Meredith | matthew.meredith@metaswitch.net |
|
||||
| Mateusz Gozdek | mgozdek@microsoft.com |
|
||||
| Mathias Lafeldt | mathias.lafeldt@gmail.com |
|
||||
| Matt | matt@projectcalico.org |
|
||||
| Matt Boersma | matt@opdemand.com |
|
||||
| Matt Dupre | matthewdupre@users.noreply.github.com |
|
||||
| Matt Kelly | Matthew.Joseph.Kelly@gmail.com |
|
||||
| Matt Leung | mleung975@gmail.com |
|
||||
| Matthew | mfisher@engineyard.com |
|
||||
| Matthew Fenwick | mfenwick100@gmail.com |
|
||||
| Matthew Fisher | matthewf@opdemand.com |
|
||||
| Max Kudosh | max_kudosh@hotmail.com |
|
||||
| Max S | maxstr@users.noreply.github.com |
|
||||
| Max Stritzinger | mstritzinger@bloomberg.net |
|
||||
| Maxim Ivanov | ivanov.maxim@gmail.com |
|
||||
| Maximilian Bischoff | maximilian.bischoff@inovex.de |
|
||||
| Mayo | mayocream39@yahoo.co.jp |
|
||||
| Mazdak Nasab | mazdak@tigera.io |
|
||||
| mchtech | michu_an@126.com |
|
||||
| meeee | michael+git@frister.net |
|
||||
| meijin | meijin@tiduyun.com |
|
||||
| melissaml | ma.lei@99cloud.net |
|
||||
| Michael Dong | michael.dong@vrviu.com |
|
||||
| Michael Stowe | me@mikestowe.com |
|
||||
| Michael Vierling | michael@tigera.io |
|
||||
| Micheal Waltz | ecliptik@gmail.com |
|
||||
| Mikalai Kastsevich | kostevich-kolya@mail.ru |
|
||||
| Mike Kostersitz | mikek@microsoft.com |
|
||||
| Mike Palmer | mike@mikepalmer.net |
|
||||
| Mike Scherbakov | mihgen@gmail.com |
|
||||
| Mike Spreitzer | mspreitz@us.ibm.com |
|
||||
| Mike Stephen | mike.stephen@tigera.io |
|
||||
| Mike Stowe | mikestowe@Mikes-MBP.sfo.tigera.io |
|
||||
| mikev | mvierling@gmail.com |
|
||||
| Miouge1 | Miouge1@users.noreply.github.com |
|
||||
| ml | 6209465+ml-@users.noreply.github.com |
|
||||
| mlbarrow | michael@barrow.me |
|
||||
| MofeLee | mofe@me.com |
|
||||
| Mohamed | mohamed.elzarei@motius.de |
|
||||
| Molnigt | jan.munkhammar@safespring.com |
|
||||
| Monty Taylor | mordred@inaugust.com |
|
||||
| Mridul Gain | mridulgain@gmail.com |
|
||||
| Muhammad Saghir | msagheer92@gmail.com |
|
||||
| Muhammet Arslan | muhammet.arsln@gmail.com |
|
||||
| Murali Paluru | leodotcloud@gmail.com |
|
||||
| Mészáros Mihály | misi@majd.eu |
|
||||
| Nate Taylor | ntaylor1781@gmail.com |
|
||||
| Nathan Fritz | fritzy@netflint.net |
|
||||
| Nathan Skrzypczak | nathan.skrzypczak@gmail.com |
|
||||
| Nathan Wouda | nwouda@users.noreply.github.com |
|
||||
| Neil Jerram | nj@metaswitch.com |
|
||||
| Nic Doye | nic@worldofnic.org |
|
||||
| Nick Bartos | nick@pistoncloud.com |
|
||||
| Nick Wood | nwood@microsoft.com |
|
||||
| Nikkau | nikkau@nikkau.net |
|
||||
| Nirman Narang | narang@us.ibm.com |
|
||||
| njuptlzf | njuptlzf@163.com |
|
||||
| Noah Treuhaft | noah.treuhaft@docker.com |
|
||||
| nohajc | nohajc@gmail.com |
|
||||
| nuczzz | 33566732+nuczzz@users.noreply.github.com |
|
||||
| nuxeric | 48699932+nuxeric@users.noreply.github.com |
|
||||
| Oded Lazar | odedlaz@gmail.com |
|
||||
| oldtree2k | oldtree2k@users.noreply.github.com |
|
||||
| Olivier Bourdon | obourdon@mirantis.com |
|
||||
| Onong Tayeng | onong.tayeng@gmail.com |
|
||||
| OpenDev Sysadmins | openstack-infra@lists.openstack.org |
|
||||
| Otto Sulin | otto.sulin@gmail.com |
|
||||
| Owen Tuz | owen@segfault.re |
|
||||
| pasanw | pasanweerasinghe@gmail.com |
|
||||
| Patrick Marques | pmarques@users.noreply.github.com |
|
||||
| Patrik Lundin | patrik@sigterm.se |
|
||||
| Paul Tiplady | symmetricone@gmail.com |
|
||||
| Pavel Khusainov | pkhusainov@mz.com |
|
||||
| Pedro Coutinho | pedro@tigera.io |
|
||||
| Penkey Suresh | penkeysuresh@users.noreply.github.com |
|
||||
| penkeysuresh | penkeysuresh@gmail.com |
|
||||
| peter | peterkellyonline@gmail.com |
|
||||
| Peter Kelly | 659713+petercork@users.noreply.github.com |
|
||||
| Peter Nordquist | peter.nordquist@pnnl.gov |
|
||||
| Peter Salanki | peter@salanki.st |
|
||||
| Peter White | peter.white@metaswitch.com |
|
||||
| Phil Kates | me@philkates.com |
|
||||
| Philip Southam | philip.southam@jpl.nasa.gov |
|
||||
| Phu Kieu | pkieu@jpl.nasa.gov |
|
||||
| Pierre Grimaud | grimaud.pierre@gmail.com |
|
||||
| Pike.SZ.fish | pikeszfish@gmail.com |
|
||||
| Prayag Verma | prayag.verma@gmail.com |
|
||||
| Pushkar Joglekar | pjoglekar@vmware.com |
|
||||
| PythonSyntax1 | 51872355+PythonSyntax1@users.noreply.github.com |
|
||||
| Qiu Yu | qiuyu@ebaysf.com |
|
||||
| Rafael | rafael@tigera.io |
|
||||
| Rafal Borczuch | rafalq.b+github@gmail.com |
|
||||
| Rafe Colton | r.colton@modcloth.com |
|
||||
| Rahul Krishna Upadhyaya | rakrup@gmail.com |
|
||||
| rao yunkun | yunkunrao@gmail.com |
|
||||
| Renan Gonçalves | renan.saddam@gmail.com |
|
||||
| Rene Dekker | rene@tigera.io |
|
||||
| Rene Kaufmann | kaufmann.r@gmail.com |
|
||||
| Reza R | 54559947+frozenprocess@users.noreply.github.com |
|
||||
| Ricardo Katz | rikatz@users.noreply.github.com |
|
||||
| Ricardo Pchevuzinske Katz | ricardo.katz@serpro.gov.br |
|
||||
| Richard Kovacs | kovacsricsi@gmail.com |
|
||||
| Richard Laughlin | richardwlaughlin@gmail.com |
|
||||
| Richard Marshall | richard.marshall@ask.com |
|
||||
| Ripta Pasay | ripta@users.noreply.github.com |
|
||||
| Rob Brockbank | robbrockbank@gmail.com |
|
||||
| Rob Terhaar | robbyt@robbyt.net |
|
||||
| Robert Brockbank | rob.brockbank@metswitch.com |
|
||||
| Robert Coleman | github@robert.net.nz |
|
||||
| Roberto Alcantara | roberto@eletronica.org |
|
||||
| Robin Müller | robin.mueller@outlook.de |
|
||||
| Rodrigo Barbieri | rodrigo.barbieri2010@gmail.com |
|
||||
| Roman Danko | elcomtik@users.noreply.github.com |
|
||||
| Roman Sokolkov | roman@giantswarm.io |
|
||||
| Ronnie P. Thomas | rpthms@users.noreply.github.com |
|
||||
| Roshani Rathi | rrroshani227@gmail.com |
|
||||
| roshanirathi | 42164609+roshanirathi@users.noreply.github.com |
|
||||
| Rui Chen | rchen@meetup.com |
|
||||
| rushtehrani | r@inven.io |
|
||||
| Rustam Zagirov | stammru@gmail.com |
|
||||
| Ryan Zhang | ryan.zhang@docker.com |
|
||||
| rymarchikbot | 43807162+rymarchikbot@users.noreply.github.com |
|
||||
| Saeid Askari | askari.saeed@gmail.com |
|
||||
| Satish Matti | smatti@google.com |
|
||||
| Satoru Takeuchi | sat@cybozu.co.jp |
|
||||
| Saurabh Mohan | saurabh@tigera.io |
|
||||
| Sean Kilgore | logikal@users.noreply.github.com |
|
||||
| Sedef | ssavas@vmware.com |
|
||||
| Semaphore Automatic Update | tom@tigera.io |
|
||||
| Sergey Kulanov | skulanov@mirantis.com |
|
||||
| Sergey Melnik | sergey.melnik@commercetools.de |
|
||||
| Seth | sethpmccombs@gmail.com |
|
||||
| Seth Malaki | seth@tigera.io |
|
||||
| Shatrugna Sadhu | shatrugna.sadhu@gmail.com |
|
||||
| Shaun Crampton | smc@metaswitch.com |
|
||||
| shouheng.lei | shouheng.lei@easystack.cn |
|
||||
| Simão Reis | smnrsti@gmail.com |
|
||||
| SONG JIANG | song@tigera.io |
|
||||
| SongmingYan | yan.songming@zte.com.cn |
|
||||
| spdfnet | 32593931+spdfnet@users.noreply.github.com |
|
||||
| Spike Curtis | spike@tigera.io |
|
||||
| squ94wk | squ94wk@googlemail.com |
|
||||
| sridhar | sridhar@tigera.io |
|
||||
| sridhartigera | 63839878+sridhartigera@users.noreply.github.com |
|
||||
| Sriram Yagnaraman | sriram.yagnaraman@est.tech |
|
||||
| Stanislav Yotov | 29090864+svyotov@users.noreply.github.com |
|
||||
| Stanislav-Galchynski | Stanislav.Galchynski@itechart-group.com |
|
||||
| Stefan Breunig | stefan.breunig@xing.com |
|
||||
| Stefan Bueringer | sbueringer@gmail.com |
|
||||
| Stephen Schlie | schlie@tigera.io |
|
||||
| Steve Gao | steve@tigera.io |
|
||||
| Stéphane Cottin | stephane.cottin@vixns.com |
|
||||
| Suraiya Hameed | 22776421+Suraiya-Hameed@users.noreply.github.com |
|
||||
| Suraj Narwade | surajnarwade353@gmail.com |
|
||||
| svInfra17 | vinayak@infracloud.io |
|
||||
| Szymon Pyżalski | spyzalski@mirantis.com |
|
||||
| TAKAHASHI Shuuji | shuuji3@gmail.com |
|
||||
| Tamal Saha | tamal@appscode.com |
|
||||
| Tathagata Chowdhury | calsoft.tathagata.chowdhury@tigera.io |
|
||||
| tathagatachowdhury | tathagata.chowdhury@calsoftinc.com |
|
||||
| Teller-Ulam | 2749404+Teller-Ulam@users.noreply.github.com |
|
||||
| Thijs Scheepers | tscheepers@users.noreply.github.com |
|
||||
| Thilo Fromm | thilo@kinvolk.io |
|
||||
| Thomas Lohner | tl@scale.sc |
|
||||
| Tim Bart | tim@pims.me |
|
||||
| Tim Briggs | timothydbriggs@gmail.com |
|
||||
| Timo Beckers | timo.beckers@klarrio.com |
|
||||
| Todd Nine | tnine@apigee.com |
|
||||
| Tom Denham | tom@tomdee.co.uk |
|
||||
| Tom Pointon | tom@teepeestudios.net |
|
||||
| Tomas Hruby | tomas@tigera.io |
|
||||
| Tomas Mazak | tomas@valec.net |
|
||||
| Tommaso Pozzetti | tommypozzetti@hotmail.it |
|
||||
| tonic | tonicbupt@gmail.com |
|
||||
| ToroNZ | tomasmaggio@gmail.com |
|
||||
| Trapier Marshall | trapier.marshall@docker.com |
|
||||
| Trevor Tao | trevor.tao@arm.com |
|
||||
| Trond Hasle Amundsen | t.h.amundsen@usit.uio.no |
|
||||
| turekt | 32360115+turekt@users.noreply.github.com |
|
||||
| tuti | tuti@tigera.io |
|
||||
| Tyler Stachecki | tstachecki@bloomberg.net |
|
||||
| Uwe Dauernheim | uwe@dauernheim.net |
|
||||
| Uwe Krueger | uwe.krueger@sap.com |
|
||||
| vagrant | vagrant@mesos.vm |
|
||||
| Valentin Ouvrard | valentin.ouvrard@nautile.sarl |
|
||||
| Viacheslav Vasilyev | avoidik@gmail.com |
|
||||
| Vieri | 15050873171@163.com |
|
||||
| Vincent Schwarzer | vincent.schwarzer@yahoo.de |
|
||||
| Vivek Thrivikraman | vivek.thrivikraman@est.tech |
|
||||
| wangwengang | wangwengang@inspur.com |
|
||||
| Wei Kin Huang | weikin.huang04@gmail.com |
|
||||
| Wei.ZHAO | zhaowei@qiyi.com |
|
||||
| weizhouBlue | 45163302+weizhouBlue@users.noreply.github.com |
|
||||
| Wietse Muizelaar | wmuizelaar@bol.com |
|
||||
| Will Rouesnel | w.rouesnel@gmail.com |
|
||||
| Wouter Schoot | wouter@schoot.org |
|
||||
| wuranbo | wuranbo@gmail.com |
|
||||
| wwgfhf | 51694849+wwgfhf@users.noreply.github.com |
|
||||
| Xiang Dai | 764524258@qq.com |
|
||||
| Xiang Liu | lx1036@126.com |
|
||||
| xieyanker | xjsisnice@gmail.com |
|
||||
| Xin He | he_xinworld@126.com |
|
||||
| YAMAMOTO Takashi | yamamoto@midokura.com |
|
||||
| Yan Zhu | yanzhu@alauda.io |
|
||||
| yang59324 | yang59324@163.com |
|
||||
| yanyan8566 | 62531742+yanyan8566@users.noreply.github.com |
|
||||
| yassan | yassan0627@gmail.com |
|
||||
| Yecheng Fu | cofyc.jackson@gmail.com |
|
||||
| Yi He | yi.he@arm.com |
|
||||
| Yi Tao | yitao@qiniu.com |
|
||||
| ymyang | yangym9@lenovo.com |
|
||||
| Yongkun Gui | ygui@google.com |
|
||||
| Yuji Azama | yuji.azama@gmail.com |
|
||||
| zealic | zealic@gmail.com |
|
||||
| zhangjie | zhangjie0619@yeah.net |
|
||||
| zhouxinyong | zhouxinyong@inspur.com |
|
||||
| Zopanix | zopanix@gmail.com |
|
||||
| Zuul | zuul@review.openstack.org |
|
||||
176
vendor/github.com/projectcalico/calico/LICENSE
generated
vendored
Normal file
176
vendor/github.com/projectcalico/calico/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
@@ -124,7 +124,7 @@ func NewResourceCache(args ResourceCacheArgs) ResourceCache {
|
||||
|
||||
func (c *calicoCache) Set(key string, newObj interface{}) {
|
||||
if reflect.TypeOf(newObj) != c.ObjectType {
|
||||
c.log.Fatalf("Wrong object type recieved to store in cache. Expected: %s, Found: %s", c.ObjectType, reflect.TypeOf(newObj))
|
||||
c.log.Fatalf("Wrong object type received to store in cache. Expected: %s, Found: %s", c.ObjectType, reflect.TypeOf(newObj))
|
||||
}
|
||||
|
||||
// Check if the object exists in the cache already. If it does and hasn't changed,
|
||||
@@ -19,7 +19,7 @@ type Converter interface {
|
||||
// Converts kubernetes object to calico representation of it.
|
||||
Convert(k8sObj interface{}) (interface{}, error)
|
||||
|
||||
// Returns apporpriate key for the object
|
||||
// Returns appropriate key for the object
|
||||
GetKey(obj interface{}) string
|
||||
|
||||
// DeleteArgsFromKey returns name and namespace of the object to pass to Delete
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -17,10 +17,11 @@ package converter
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
@@ -33,7 +34,7 @@ func NewNamespaceConverter() Converter {
|
||||
return &namespaceConverter{}
|
||||
}
|
||||
func (nc *namespaceConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
var c conversion.Converter
|
||||
c := conversion.NewConverter()
|
||||
namespace, ok := k8sObj.(*v1.Namespace)
|
||||
if !ok {
|
||||
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
|
||||
@@ -52,7 +53,7 @@ func (nc *namespaceConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
profile := kvp.Value.(*api.Profile)
|
||||
|
||||
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
|
||||
// not relevant so we ignore them. This prevents uncessary updates.
|
||||
// not relevant so we ignore them. This prevents unnecessary updates.
|
||||
profile.ObjectMeta = metav1.ObjectMeta{Name: profile.Name}
|
||||
|
||||
return *profile, nil
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,11 +15,14 @@
|
||||
package converter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
|
||||
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -49,15 +52,19 @@ func (p *policyConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var c conversion.Converter
|
||||
c := conversion.NewConverter()
|
||||
kvp, err := c.K8sNetworkPolicyToCalico(np)
|
||||
if err != nil {
|
||||
// Silently ignore rule conversion errors. We don't expect any conversion errors
|
||||
// since the data given to us here is validated by the Kubernetes API. The conversion
|
||||
// code ignores any rules that it cannot parse, and we will pass the valid ones to Felix.
|
||||
var e *cerrors.ErrorPolicyConversion
|
||||
if err != nil && !errors.As(err, &e) {
|
||||
return nil, err
|
||||
}
|
||||
cnp := kvp.Value.(*api.NetworkPolicy)
|
||||
|
||||
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
|
||||
// not relevant so we ignore them. This prevents uncessary updates.
|
||||
// not relevant so we ignore them. This prevents unnecessary updates.
|
||||
cnp.ObjectMeta = metav1.ObjectMeta{Name: cnp.Name, Namespace: cnp.Namespace}
|
||||
|
||||
return *cnp, err
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2020 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -18,34 +18,49 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
api "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// WorkloadEndpointData is an internal struct used to store the various bits
|
||||
// of information that the policy controller cares about on a workload endpoint.
|
||||
type WorkloadEndpointData struct {
|
||||
PodName string
|
||||
Namespace string
|
||||
Labels map[string]string
|
||||
PodName string
|
||||
Namespace string
|
||||
Labels map[string]string
|
||||
ServiceAccount string
|
||||
}
|
||||
|
||||
type podConverter struct {
|
||||
type PodConverter interface {
|
||||
Convert(k8sObj interface{}) ([]WorkloadEndpointData, error)
|
||||
GetKey(obj WorkloadEndpointData) string
|
||||
DeleteArgsFromKey(key string) (string, string)
|
||||
}
|
||||
|
||||
type podConverter struct{}
|
||||
|
||||
// BuildWorkloadEndpointData generates the correct WorkloadEndpointData for the given
|
||||
// WorkloadEndpoint, extracting fields that the policy controller is responsible for syncing.
|
||||
func BuildWorkloadEndpointData(wep api.WorkloadEndpoint) WorkloadEndpointData {
|
||||
return WorkloadEndpointData{
|
||||
PodName: wep.Spec.Pod,
|
||||
Namespace: wep.Namespace,
|
||||
Labels: wep.Labels,
|
||||
// list of WorkloadEndpoints, extracting fields that the policy controller is responsible
|
||||
// for syncing.
|
||||
func BuildWorkloadEndpointData(weps ...api.WorkloadEndpoint) []WorkloadEndpointData {
|
||||
var retWEPs []WorkloadEndpointData
|
||||
for _, wep := range weps {
|
||||
retWEPs = append(retWEPs, WorkloadEndpointData{
|
||||
PodName: wep.Spec.Pod,
|
||||
Namespace: wep.Namespace,
|
||||
Labels: wep.Labels,
|
||||
ServiceAccount: wep.Spec.ServiceAccountName,
|
||||
})
|
||||
}
|
||||
|
||||
return retWEPs
|
||||
}
|
||||
|
||||
// MergeWorkloadEndpointData applies the given WorkloadEndpointData to the provided
|
||||
@@ -55,26 +70,20 @@ func MergeWorkloadEndpointData(wep *api.WorkloadEndpoint, upd WorkloadEndpointDa
|
||||
log.Fatalf("Bad attempt to merge data for %s/%s into wep %s/%s", upd.PodName, upd.Namespace, wep.Name, wep.Namespace)
|
||||
}
|
||||
wep.Labels = upd.Labels
|
||||
wep.Spec.ServiceAccountName = upd.ServiceAccount
|
||||
}
|
||||
|
||||
// NewPodConverter Constructor for podConverter
|
||||
func NewPodConverter() Converter {
|
||||
func NewPodConverter() PodConverter {
|
||||
return &podConverter{}
|
||||
}
|
||||
|
||||
func (p *podConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
func (p *podConverter) Convert(k8sObj interface{}) ([]WorkloadEndpointData, error) {
|
||||
// Convert Pod into a workload endpoint.
|
||||
var c conversion.Converter
|
||||
pod, ok := k8sObj.(*v1.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, errors.New("couldn't get object from tombstone")
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return nil, errors.New("tombstone contained object that is not a Pod")
|
||||
}
|
||||
c := conversion.NewConverter()
|
||||
pod, err := ExtractPodFromUpdate(k8sObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The conversion logic always requires a node, but we don't always have one. We don't actually
|
||||
@@ -83,21 +92,31 @@ func (p *podConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
pod.Spec.NodeName = "unknown.node"
|
||||
}
|
||||
|
||||
kvp, err := c.PodToWorkloadEndpoint(pod)
|
||||
kvps, err := c.PodToWorkloadEndpoints(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wep := kvp.Value.(*api.WorkloadEndpoint)
|
||||
|
||||
// Build and return a WorkloadEndpointData struct using the data.
|
||||
return BuildWorkloadEndpointData(*wep), nil
|
||||
return BuildWorkloadEndpointData(kvpsToWEPs(kvps)...), nil
|
||||
}
|
||||
|
||||
func kvpsToWEPs(kvps []*model.KVPair) []api.WorkloadEndpoint {
|
||||
var weps []api.WorkloadEndpoint
|
||||
for _, kvp := range kvps {
|
||||
wep := kvp.Value.(*api.WorkloadEndpoint)
|
||||
if wep != nil {
|
||||
weps = append(weps, *wep)
|
||||
}
|
||||
}
|
||||
|
||||
return weps
|
||||
}
|
||||
|
||||
// GetKey takes a WorkloadEndpointData and returns the key which
|
||||
// identifies it - namespace/name
|
||||
func (p *podConverter) GetKey(obj interface{}) string {
|
||||
e := obj.(WorkloadEndpointData)
|
||||
return fmt.Sprintf("%s/%s", e.Namespace, e.PodName)
|
||||
func (p *podConverter) GetKey(obj WorkloadEndpointData) string {
|
||||
return fmt.Sprintf("%s/%s", obj.Namespace, obj.PodName)
|
||||
}
|
||||
|
||||
func (p *podConverter) DeleteArgsFromKey(key string) (string, string) {
|
||||
@@ -107,3 +126,21 @@ func (p *podConverter) DeleteArgsFromKey(key string) (string, string) {
|
||||
log.Panicf("DeleteArgsFromKey call for WorkloadEndpoints is not allowed")
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// ExtractPodFromUpdate takes an update as received from the informer and returns the pod object, if present.
|
||||
// some updates (particularly deletes) can include tombstone placeholders rather than an exact pod object. This
|
||||
// function should be called in order to safely handles those cases.
|
||||
func ExtractPodFromUpdate(obj interface{}) (*v1.Pod, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, errors.New("couldn't get object from tombstone")
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return nil, errors.New("tombstone contained object that is not a Pod")
|
||||
}
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2018-2020 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -17,10 +17,11 @@ package converter
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
@@ -34,7 +35,7 @@ func NewServiceAccountConverter() Converter {
|
||||
}
|
||||
|
||||
func (nc *serviceAccountConverter) Convert(k8sObj interface{}) (interface{}, error) {
|
||||
var c conversion.Converter
|
||||
c := conversion.NewConverter()
|
||||
serviceAccount, ok := k8sObj.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
|
||||
@@ -53,7 +54,7 @@ func (nc *serviceAccountConverter) Convert(k8sObj interface{}) (interface{}, err
|
||||
profile := kvp.Value.(*api.Profile)
|
||||
|
||||
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
|
||||
// not relevant so we ignore them. This prevents uncessary updates.
|
||||
// not relevant so we ignore them. This prevents unnecessary updates.
|
||||
profile.ObjectMeta = metav1.ObjectMeta{Name: profile.Name}
|
||||
|
||||
return *profile, nil
|
||||
176
vendor/github.com/projectcalico/calico/libcalico-go/LICENSE
generated
vendored
Normal file
176
vendor/github.com/projectcalico/calico/libcalico-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
@@ -16,6 +16,8 @@ package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,9 +39,13 @@ type BlockAffinity struct {
|
||||
|
||||
// BlockAffinitySpec contains the specification for a BlockAffinity resource.
|
||||
type BlockAffinitySpec struct {
|
||||
State string `json:"state"`
|
||||
Node string `json:"node"`
|
||||
CIDR string `json:"cidr"`
|
||||
State string `json:"state"`
|
||||
Node string `json:"node"`
|
||||
CIDR string `json:"cidr"`
|
||||
|
||||
// Deleted indicates that this block affinity is being deleted.
|
||||
// This field is a string for compatibility with older releases that
|
||||
// mistakenly treat this field as a string.
|
||||
Deleted string `json:"deleted"`
|
||||
}
|
||||
|
||||
@@ -58,7 +64,7 @@ func NewBlockAffinity() *BlockAffinity {
|
||||
return &BlockAffinity{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBlockAffinity,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -69,7 +75,7 @@ func NewBlockAffinityList() *BlockAffinityList {
|
||||
return &BlockAffinityList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindBlockAffinityList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -15,20 +15,12 @@
|
||||
/*
|
||||
Package v3 implements the resource definitions used on the Calico client API.
|
||||
|
||||
The valid resource types are:
|
||||
- BGPPeer
|
||||
- GlobalNetworkPolicy
|
||||
- HostEndpoint
|
||||
- IPPool
|
||||
- NetworkPolicy
|
||||
- Profile
|
||||
- WorkloadEndpoint
|
||||
|
||||
The resource structures include the JSON tags for each exposed field. These are standard
|
||||
golang tags that define the JSON format of the structures as used by calicoctl. The YAML
|
||||
format also used by calicoctl is directly mapped from the JSON.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v3
|
||||
121
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/ipam_block.go
generated
vendored
Normal file
121
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/ipam_block.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
KindIPAMBlock = "IPAMBlock"
|
||||
KindIPAMBlockList = "IPAMBlockList"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAMBlock contains information about a block for IP address assignment.
|
||||
type IPAMBlock struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the IPAMBlock.
|
||||
Spec IPAMBlockSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// IPAMBlockSpec contains the specification for an IPAMBlock resource.
|
||||
type IPAMBlockSpec struct {
|
||||
// The block's CIDR.
|
||||
CIDR string `json:"cidr"`
|
||||
|
||||
// Affinity of the block, if this block has one. If set, it will be of the form
|
||||
// "host:<hostname>". If not set, this block is not affine to a host.
|
||||
Affinity *string `json:"affinity,omitempty"`
|
||||
|
||||
// Array of allocations in-use within this block. nil entries mean the allocation is free.
|
||||
// For non-nil entries at index i, the index is the ordinal of the allocation within this block
|
||||
// and the value is the index of the associated attributes in the Attributes array.
|
||||
Allocations []*int `json:"allocations"`
|
||||
|
||||
// Unallocated is an ordered list of allocations which are free in the block.
|
||||
Unallocated []int `json:"unallocated"`
|
||||
|
||||
// Attributes is an array of arbitrary metadata associated with allocations in the block. To find
|
||||
// attributes for a given allocation, use the value of the allocation's entry in the Allocations array
|
||||
// as the index of the element in this array.
|
||||
Attributes []AllocationAttribute `json:"attributes"`
|
||||
|
||||
// We store a sequence number that is updated each time the block is written.
|
||||
// Each allocation will also store the sequence number of the block at the time of its creation.
|
||||
// When releasing an IP, passing the sequence number associated with the allocation allows us
|
||||
// to protect against a race condition and ensure the IP hasn't been released and re-allocated
|
||||
// since the release request.
|
||||
//
|
||||
// +kubebuilder:default=0
|
||||
// +optional
|
||||
SequenceNumber uint64 `json:"sequenceNumber"`
|
||||
|
||||
// Map of allocated ordinal within the block to sequence number of the block at
|
||||
// the time of allocation. Kubernetes does not allow numerical keys for maps, so
|
||||
// the key is cast to a string.
|
||||
// +optional
|
||||
SequenceNumberForAllocation map[string]uint64 `json:"sequenceNumberForAllocation"`
|
||||
|
||||
// Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby
|
||||
// deletion will not return a conflict error if the block has been updated. It should not be set manually.
|
||||
// +optional
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
// StrictAffinity on the IPAMBlock is deprecated and no longer used by the code. Use IPAMConfig StrictAffinity instead.
|
||||
DeprecatedStrictAffinity bool `json:"strictAffinity"`
|
||||
}
|
||||
|
||||
type AllocationAttribute struct {
|
||||
AttrPrimary *string `json:"handle_id,omitempty"`
|
||||
AttrSecondary map[string]string `json:"secondary,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAMBlockList contains a list of IPAMBlock resources.
|
||||
type IPAMBlockList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []IPAMBlock `json:"items"`
|
||||
}
|
||||
|
||||
// NewIPAMBlock creates a new (zeroed) IPAMBlock struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewIPAMBlock() *IPAMBlock {
|
||||
return &IPAMBlock{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMBlock,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewIPAMBlockList creates a new (zeroed) IPAMBlockList struct with the TypeMetadata initialised to the current
|
||||
// version.
|
||||
func NewIPAMBlockList() *IPAMBlockList {
|
||||
return &IPAMBlockList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMBlockList,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -16,11 +16,14 @@ package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
KindIPAMConfig = "IPAMConfig"
|
||||
KindIPAMConfigList = "IPAMConfigList"
|
||||
KindIPAMConfig = "IPAMConfig"
|
||||
KindIPAMConfigList = "IPAMConfigList"
|
||||
GlobalIPAMConfigName = "default"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -29,16 +32,25 @@ const (
|
||||
// IPAMConfig contains information about a block for IP address assignment.
|
||||
type IPAMConfig struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Specification of the IPAMConfig.
|
||||
Spec IPAMConfigSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// IPAMConfigSpec contains the specification for a IPAMConfig resource.
|
||||
// IPAMConfigSpec contains the specification for an IPAMConfig resource.
|
||||
type IPAMConfigSpec struct {
|
||||
StrictAffinity bool `json:"strictAffinity"`
|
||||
AutoAllocateBlocks bool `json:"autoAllocateBlocks"`
|
||||
|
||||
// MaxBlocksPerHost, if non-zero, is the max number of blocks that can be
|
||||
// affine to each host.
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
// +kubebuilder:validation:Maximum:=2147483647
|
||||
// +optional
|
||||
MaxBlocksPerHost int `json:"maxBlocksPerHost,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -56,7 +68,7 @@ func NewIPAMConfig() *IPAMConfig {
|
||||
return &IPAMConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMConfig,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -67,7 +79,7 @@ func NewIPAMConfigList() *IPAMConfigList {
|
||||
return &IPAMConfigList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMConfigList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,8 @@ package v3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,7 +28,7 @@ const (
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAMHandle contains information about a IPAMHandle resource.
|
||||
// IPAMHandle contains information about an IPAMHandle resource.
|
||||
type IPAMHandle struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
@@ -35,10 +37,13 @@ type IPAMHandle struct {
|
||||
Spec IPAMHandleSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// IPAMHandleSpec contains the specification for a IPAMHandle resource.
|
||||
// IPAMHandleSpec contains the specification for an IPAMHandle resource.
|
||||
type IPAMHandleSpec struct {
|
||||
HandleID string `json:"handleID"`
|
||||
Block map[string]int `json:"block"`
|
||||
|
||||
// +optional
|
||||
Deleted bool `json:"deleted"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -56,7 +61,7 @@ func NewIPAMHandle() *IPAMHandle {
|
||||
return &IPAMHandle{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMHandle,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -67,7 +72,7 @@ func NewIPAMHandleList() *IPAMHandleList {
|
||||
return &IPAMHandleList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindIPAMHandleList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017,2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -17,12 +17,17 @@ package v3
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
KindNode = "Node"
|
||||
KindNodeList = "NodeList"
|
||||
CalicoNodeIP = "CalicoNodeIP"
|
||||
InternalIP = "InternalIP"
|
||||
ExternalIP = "ExternalIP"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -35,6 +40,8 @@ type Node struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the Node.
|
||||
Spec NodeSpec `json:"spec,omitempty"`
|
||||
// Status of the Node.
|
||||
Status NodeStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// NodeSpec contains the specification for a Node resource.
|
||||
@@ -48,8 +55,42 @@ type NodeSpec struct {
|
||||
// VXLANTunnelMACAddr is the MAC address of the VXLAN tunnel.
|
||||
VXLANTunnelMACAddr string `json:"vxlanTunnelMACAddr,omitempty" validate:"omitempty,mac"`
|
||||
|
||||
// IPv6VXLANTunnelAddr is the address of the IPv6 VXLAN tunnel.
|
||||
IPv6VXLANTunnelAddr string `json:"ipv6VXLANTunnelAddr,omitempty" validate:"omitempty,ipv6"`
|
||||
|
||||
// VXLANTunnelMACAddrV6 is the MAC address of the IPv6 VXLAN tunnel.
|
||||
VXLANTunnelMACAddrV6 string `json:"vxlanTunnelMACAddrV6,omitempty" validate:"omitempty,mac"`
|
||||
|
||||
// OrchRefs for this node.
|
||||
OrchRefs []OrchRef `json:"orchRefs,omitempty" validate:"omitempty"`
|
||||
|
||||
// Wireguard configuration for this node.
|
||||
Wireguard *NodeWireguardSpec `json:"wireguard,omitempty" validate:"omitempty"`
|
||||
|
||||
// Addresses list address that a client can reach the node at.
|
||||
Addresses []NodeAddress `json:"addresses,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// NodeAddress represents an address assigned to a node.
|
||||
type NodeAddress struct {
|
||||
// Address is a string representation of the actual address.
|
||||
Address string `json:"address" validate:"net"`
|
||||
|
||||
// Type is the node IP type
|
||||
Type string `json:"type,omitempty" validate:"omitempty,ipType"`
|
||||
}
|
||||
|
||||
type NodeStatus struct {
|
||||
// WireguardPublicKey is the IPv4 Wireguard public-key for this node.
|
||||
// wireguardPublicKey validates if the string is a valid base64 encoded key.
|
||||
WireguardPublicKey string `json:"wireguardPublicKey,omitempty" validate:"omitempty,wireguardPublicKey"`
|
||||
|
||||
// WireguardPublicKeyV6 is the IPv6 Wireguard public-key for this node.
|
||||
// wireguardPublicKey validates if the string is a valid base64 encoded key.
|
||||
WireguardPublicKeyV6 string `json:"wireguardPublicKeyV6,omitempty" validate:"omitempty,wireguardPublicKey"`
|
||||
|
||||
// PodCIDR is a reflection of the Kubernetes node's spec.PodCIDRs field.
|
||||
PodCIDRs []string `json:"podCIDRs,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
// OrchRef is used to correlate a Calico node to its corresponding representation in a given orchestrator
|
||||
@@ -78,6 +119,15 @@ type NodeBGPSpec struct {
|
||||
RouteReflectorClusterID string `json:"routeReflectorClusterID,omitempty" validate:"omitempty,ipv4"`
|
||||
}
|
||||
|
||||
// NodeWireguardSpec contains the specification for the Node wireguard configuration.
|
||||
type NodeWireguardSpec struct {
|
||||
// InterfaceIPv4Address is the IP address for the IPv4 Wireguard interface.
|
||||
InterfaceIPv4Address string `json:"interfaceIPv4Address,omitempty" validate:"omitempty,ipv4"`
|
||||
|
||||
// InterfaceIPv6Address is the IP address for the IPv6 Wireguard interface.
|
||||
InterfaceIPv6Address string `json:"interfaceIPv6Address,omitempty" validate:"omitempty,ipv6"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NodeList contains a list of Node resources.
|
||||
@@ -93,7 +143,7 @@ func NewNode() *Node {
|
||||
return &Node{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindNode,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -104,7 +154,7 @@ func NewNodeList() *NodeList {
|
||||
return &NodeList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindNodeList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
3276
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/openapi_generated.go
generated
vendored
Normal file
3276
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/openapi_generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: "crd.projectcalico.org", Version: "v3"}
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: "crd.projectcalico.org", Version: "v1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
@@ -14,7 +14,12 @@
|
||||
|
||||
package v3
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
KindWorkloadEndpoint = "WorkloadEndpoint"
|
||||
@@ -48,6 +53,8 @@ type WorkloadEndpointSpec struct {
|
||||
Pod string `json:"pod,omitempty" validate:"omitempty,name"`
|
||||
// The Endpoint name.
|
||||
Endpoint string `json:"endpoint,omitempty" validate:"omitempty,name"`
|
||||
// ServiceAccountName, if specified, is the name of the k8s ServiceAccount for this pod.
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty" validate:"omitempty,name"`
|
||||
// IPNetworks is a list of subnets allocated to this endpoint. IP packets will only be
|
||||
// allowed to leave this interface if they come from an address in one of these subnets.
|
||||
// Currently only /32 for IPv4 and /128 for IPv6 networks are supported.
|
||||
@@ -71,7 +78,19 @@ type WorkloadEndpointSpec struct {
|
||||
// MAC is the MAC address of the endpoint interface.
|
||||
MAC string `json:"mac,omitempty" validate:"omitempty,mac"`
|
||||
// Ports contains the endpoint's named ports, which may be referenced in security policy rules.
|
||||
Ports []EndpointPort `json:"ports,omitempty" validate:"dive,omitempty"`
|
||||
Ports []WorkloadEndpointPort `json:"ports,omitempty" validate:"dive,omitempty"`
|
||||
// AllowSpoofedSourcePrefixes is a list of CIDRs that the endpoint should be able to send traffic from,
|
||||
// bypassing the RPF check.
|
||||
AllowSpoofedSourcePrefixes []string `json:"allowSpoofedSourcePrefixes,omitempty" validate:"omitempty,dive,cidr"`
|
||||
}
|
||||
|
||||
// WorkloadEndpointPort represents one endpoint's named or mapped port
|
||||
type WorkloadEndpointPort struct {
|
||||
Name string `json:"name" validate:"omitempty,portName"`
|
||||
Protocol numorstring.Protocol `json:"protocol"`
|
||||
Port uint16 `json:"port" validate:"gt=0"`
|
||||
HostPort uint16 `json:"hostPort"`
|
||||
HostIP string `json:"hostIP" validate:"omitempty,net"`
|
||||
}
|
||||
|
||||
// IPNat contains a single NAT mapping for a WorkloadEndpoint resource.
|
||||
@@ -98,7 +117,7 @@ func NewWorkloadEndpoint() *WorkloadEndpoint {
|
||||
return &WorkloadEndpoint{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindWorkloadEndpoint,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -109,7 +128,7 @@ func NewWorkloadEndpointList() *WorkloadEndpointList {
|
||||
return &WorkloadEndpointList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindWorkloadEndpointList,
|
||||
APIVersion: GroupVersionCurrent,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
720
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/zz_generated.deepcopy.go
generated
vendored
Normal file
720
vendor/github.com/projectcalico/calico/libcalico-go/lib/apis/v3/zz_generated.deepcopy.go
generated
vendored
Normal file
@@ -0,0 +1,720 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
numorstring "github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AllocationAttribute) DeepCopyInto(out *AllocationAttribute) {
|
||||
*out = *in
|
||||
if in.AttrPrimary != nil {
|
||||
in, out := &in.AttrPrimary, &out.AttrPrimary
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.AttrSecondary != nil {
|
||||
in, out := &in.AttrSecondary, &out.AttrSecondary
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationAttribute.
|
||||
func (in *AllocationAttribute) DeepCopy() *AllocationAttribute {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AllocationAttribute)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BlockAffinity) DeepCopyInto(out *BlockAffinity) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinity.
|
||||
func (in *BlockAffinity) DeepCopy() *BlockAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BlockAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BlockAffinity) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BlockAffinityList) DeepCopyInto(out *BlockAffinityList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]BlockAffinity, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinityList.
|
||||
func (in *BlockAffinityList) DeepCopy() *BlockAffinityList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BlockAffinityList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BlockAffinityList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BlockAffinitySpec) DeepCopyInto(out *BlockAffinitySpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinitySpec.
|
||||
func (in *BlockAffinitySpec) DeepCopy() *BlockAffinitySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BlockAffinitySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMBlock) DeepCopyInto(out *IPAMBlock) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlock.
|
||||
func (in *IPAMBlock) DeepCopy() *IPAMBlock {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMBlock)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMBlock) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMBlockList) DeepCopyInto(out *IPAMBlockList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IPAMBlock, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlockList.
|
||||
func (in *IPAMBlockList) DeepCopy() *IPAMBlockList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMBlockList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMBlockList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMBlockSpec) DeepCopyInto(out *IPAMBlockSpec) {
|
||||
*out = *in
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Allocations != nil {
|
||||
in, out := &in.Allocations, &out.Allocations
|
||||
*out = make([]*int, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.Unallocated != nil {
|
||||
in, out := &in.Unallocated, &out.Unallocated
|
||||
*out = make([]int, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Attributes != nil {
|
||||
in, out := &in.Attributes, &out.Attributes
|
||||
*out = make([]AllocationAttribute, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SequenceNumberForAllocation != nil {
|
||||
in, out := &in.SequenceNumberForAllocation, &out.SequenceNumberForAllocation
|
||||
*out = make(map[string]uint64, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlockSpec.
|
||||
func (in *IPAMBlockSpec) DeepCopy() *IPAMBlockSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMBlockSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig.
|
||||
func (in *IPAMConfig) DeepCopy() *IPAMConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMConfig) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMConfigList) DeepCopyInto(out *IPAMConfigList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IPAMConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigList.
|
||||
func (in *IPAMConfigList) DeepCopy() *IPAMConfigList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMConfigList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMConfigList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMConfigSpec) DeepCopyInto(out *IPAMConfigSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigSpec.
|
||||
func (in *IPAMConfigSpec) DeepCopy() *IPAMConfigSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMConfigSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMHandle) DeepCopyInto(out *IPAMHandle) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandle.
|
||||
func (in *IPAMHandle) DeepCopy() *IPAMHandle {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMHandle)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMHandle) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMHandleList) DeepCopyInto(out *IPAMHandleList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IPAMHandle, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandleList.
|
||||
func (in *IPAMHandleList) DeepCopy() *IPAMHandleList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMHandleList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAMHandleList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAMHandleSpec) DeepCopyInto(out *IPAMHandleSpec) {
|
||||
*out = *in
|
||||
if in.Block != nil {
|
||||
in, out := &in.Block, &out.Block
|
||||
*out = make(map[string]int, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandleSpec.
|
||||
func (in *IPAMHandleSpec) DeepCopy() *IPAMHandleSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAMHandleSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPNAT) DeepCopyInto(out *IPNAT) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPNAT.
|
||||
func (in *IPNAT) DeepCopy() *IPNAT {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPNAT)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Node) DeepCopyInto(out *Node) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
|
||||
func (in *Node) DeepCopy() *Node {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Node)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Node) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
|
||||
func (in *NodeAddress) DeepCopy() *NodeAddress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeAddress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeBGPSpec) DeepCopyInto(out *NodeBGPSpec) {
|
||||
*out = *in
|
||||
if in.ASNumber != nil {
|
||||
in, out := &in.ASNumber, &out.ASNumber
|
||||
*out = new(numorstring.ASNumber)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeBGPSpec.
|
||||
func (in *NodeBGPSpec) DeepCopy() *NodeBGPSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeBGPSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeList) DeepCopyInto(out *NodeList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Node, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
|
||||
func (in *NodeList) DeepCopy() *NodeList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *NodeList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
|
||||
*out = *in
|
||||
if in.BGP != nil {
|
||||
in, out := &in.BGP, &out.BGP
|
||||
*out = new(NodeBGPSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.OrchRefs != nil {
|
||||
in, out := &in.OrchRefs, &out.OrchRefs
|
||||
*out = make([]OrchRef, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Wireguard != nil {
|
||||
in, out := &in.Wireguard, &out.Wireguard
|
||||
*out = new(NodeWireguardSpec)
|
||||
**out = **in
|
||||
}
|
||||
if in.Addresses != nil {
|
||||
in, out := &in.Addresses, &out.Addresses
|
||||
*out = make([]NodeAddress, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
|
||||
func (in *NodeSpec) DeepCopy() *NodeSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
||||
*out = *in
|
||||
if in.PodCIDRs != nil {
|
||||
in, out := &in.PodCIDRs, &out.PodCIDRs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
|
||||
func (in *NodeStatus) DeepCopy() *NodeStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeWireguardSpec) DeepCopyInto(out *NodeWireguardSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeWireguardSpec.
|
||||
func (in *NodeWireguardSpec) DeepCopy() *NodeWireguardSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeWireguardSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OrchRef) DeepCopyInto(out *OrchRef) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchRef.
|
||||
func (in *OrchRef) DeepCopy() *OrchRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OrchRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadEndpoint) DeepCopyInto(out *WorkloadEndpoint) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpoint.
|
||||
func (in *WorkloadEndpoint) DeepCopy() *WorkloadEndpoint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadEndpoint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadEndpoint) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadEndpointList) DeepCopyInto(out *WorkloadEndpointList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]WorkloadEndpoint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointList.
|
||||
func (in *WorkloadEndpointList) DeepCopy() *WorkloadEndpointList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadEndpointList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadEndpointList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadEndpointPort) DeepCopyInto(out *WorkloadEndpointPort) {
|
||||
*out = *in
|
||||
out.Protocol = in.Protocol
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointPort.
|
||||
func (in *WorkloadEndpointPort) DeepCopy() *WorkloadEndpointPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadEndpointPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadEndpointSpec) DeepCopyInto(out *WorkloadEndpointSpec) {
|
||||
*out = *in
|
||||
if in.IPNetworks != nil {
|
||||
in, out := &in.IPNetworks, &out.IPNetworks
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IPNATs != nil {
|
||||
in, out := &in.IPNATs, &out.IPNATs
|
||||
*out = make([]IPNAT, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]WorkloadEndpointPort, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowSpoofedSourcePrefixes != nil {
|
||||
in, out := &in.AllowSpoofedSourcePrefixes, &out.AllowSpoofedSourcePrefixes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointSpec.
|
||||
func (in *WorkloadEndpointSpec) DeepCopy() *WorkloadEndpointSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadEndpointSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -25,5 +25,23 @@ const (
|
||||
// duplicates the value of the Pod.Status.PodIP field, which is set by kubelet but,
|
||||
// since we write it ourselves, we can make sure that it is written synchronously
|
||||
// and quickly.
|
||||
//
|
||||
// We set this annotation to the empty string when the WEP is deleted by the CNI plugin.
|
||||
// That signals that the IP no longer belongs to this pod.
|
||||
AnnotationPodIP = "cni.projectcalico.org/podIP"
|
||||
|
||||
// AnnotationPodIPs is similar for the plural PodIPs field.
|
||||
AnnotationPodIPs = "cni.projectcalico.org/podIPs"
|
||||
|
||||
// AnnotationPodIPs is the annotation set by the Amazon VPC CNI plugin.
|
||||
AnnotationAWSPodIPs = "vpc.amazonaws.com/pod-ips"
|
||||
|
||||
// AnnotationContainerID stores the container ID of the pod. This allows us to disambiguate different pods
|
||||
// that have the same name and namespace. For example, stateful set pod that is restarted. May be missing
|
||||
// on older Pods.
|
||||
AnnotationContainerID = "cni.projectcalico.org/containerID"
|
||||
|
||||
// NameLabel is a label that can be used to match a serviceaccount or namespace
|
||||
// name exactly.
|
||||
NameLabel = "projectcalico.org/name"
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,11 +15,7 @@
|
||||
package conversion
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@@ -27,13 +23,17 @@ import (
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/names"
|
||||
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -47,40 +47,47 @@ const (
|
||||
SelectorPod
|
||||
)
|
||||
|
||||
// TODO: make this private and expose a public conversion interface instead
|
||||
type Converter struct{}
|
||||
type Converter interface {
|
||||
WorkloadEndpointConverter
|
||||
ParseWorkloadEndpointName(workloadName string) (names.WorkloadEndpointIdentifiers, error)
|
||||
NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, error)
|
||||
IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool
|
||||
IsReadyCalicoPod(pod *kapiv1.Pod) bool
|
||||
IsScheduled(pod *kapiv1.Pod) bool
|
||||
IsHostNetworked(pod *kapiv1.Pod) bool
|
||||
HasIPAddress(pod *kapiv1.Pod) bool
|
||||
StagedKubernetesNetworkPolicyToStagedName(stagedK8sName string) string
|
||||
K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*model.KVPair, error)
|
||||
EndpointSliceToKVP(svc *discovery.EndpointSlice) (*model.KVPair, error)
|
||||
ServiceToKVP(service *kapiv1.Service) (*model.KVPair, error)
|
||||
ProfileNameToNamespace(profileName string) (string, error)
|
||||
ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KVPair, error)
|
||||
ProfileNameToServiceAccount(profileName string) (ns, sa string, err error)
|
||||
JoinProfileRevisions(nsRev, saRev string) string
|
||||
SplitProfileRevision(rev string) (nsRev string, saRev string, err error)
|
||||
}
|
||||
|
||||
// VethNameForWorkload returns a deterministic veth name
|
||||
// for the given Kubernetes workload (WEP) name and namespace.
|
||||
func VethNameForWorkload(namespace, podname string) string {
|
||||
// A SHA1 is always 20 bytes long, and so is sufficient for generating the
|
||||
// veth name and mac addr.
|
||||
h := sha1.New()
|
||||
h.Write([]byte(fmt.Sprintf("%s.%s", namespace, podname)))
|
||||
prefix := os.Getenv("FELIX_INTERFACEPREFIX")
|
||||
if prefix == "" {
|
||||
// Prefix is not set. Default to "cali"
|
||||
prefix = "cali"
|
||||
} else {
|
||||
// Prefix is set - use the first value in the list.
|
||||
splits := strings.Split(prefix, ",")
|
||||
prefix = splits[0]
|
||||
type converter struct {
|
||||
WorkloadEndpointConverter
|
||||
}
|
||||
|
||||
func NewConverter() Converter {
|
||||
return &converter{
|
||||
WorkloadEndpointConverter: NewWorkloadEndpointConverter(),
|
||||
}
|
||||
log.WithField("prefix", prefix).Debugf("Using prefix to create a WorkloadEndpoint veth name")
|
||||
return fmt.Sprintf("%s%s", prefix, hex.EncodeToString(h.Sum(nil))[:11])
|
||||
}
|
||||
|
||||
// ParseWorkloadName extracts the Node name, Orchestrator, Pod name and endpoint from the
|
||||
// given WorkloadEndpoint name.
|
||||
// The expected format for k8s is <node>-k8s-<pod>-<endpoint>
|
||||
func (c Converter) ParseWorkloadEndpointName(workloadName string) (names.WorkloadEndpointIdentifiers, error) {
|
||||
func (c converter) ParseWorkloadEndpointName(workloadName string) (names.WorkloadEndpointIdentifiers, error) {
|
||||
return names.ParseWorkloadEndpointName(workloadName)
|
||||
}
|
||||
|
||||
// NamespaceToProfile converts a Namespace to a Calico Profile. The Profile stores
|
||||
// labels from the Namespace which are inherited by the WorkloadEndpoints within
|
||||
// the Profile. This Profile also has the default ingress and egress rules, which are both 'allow'.
|
||||
func (c Converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, error) {
|
||||
func (c converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, error) {
|
||||
// Generate the labels to apply to the profile, using a special prefix
|
||||
// to indicate that these are the labels from the parent Kubernetes Namespace.
|
||||
labels := map[string]string{}
|
||||
@@ -88,6 +95,10 @@ func (c Converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, erro
|
||||
labels[NamespaceLabelPrefix+k] = v
|
||||
}
|
||||
|
||||
// Add a label for the namespace's name. This allows exact namespace matching
|
||||
// based on name within the namespaceSelector.
|
||||
labels[NamespaceLabelPrefix+NameLabel] = ns.Name
|
||||
|
||||
// Create the profile object.
|
||||
name := NamespaceProfileNamePrefix + ns.Name
|
||||
profile := apiv3.NewProfile()
|
||||
@@ -97,15 +108,9 @@ func (c Converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, erro
|
||||
UID: ns.UID,
|
||||
}
|
||||
profile.Spec = apiv3.ProfileSpec{
|
||||
Ingress: []apiv3.Rule{{Action: apiv3.Allow}},
|
||||
Egress: []apiv3.Rule{{Action: apiv3.Allow}},
|
||||
}
|
||||
|
||||
// Only set labels to apply when there are actually labels. This makes the
|
||||
// result of this function consistent with the struct as loaded directly
|
||||
// from etcd, which uses nil for the empty map.
|
||||
if len(labels) != 0 {
|
||||
profile.Spec.LabelsToApply = labels
|
||||
Ingress: []apiv3.Rule{{Action: apiv3.Allow}},
|
||||
Egress: []apiv3.Rule{{Action: apiv3.Allow}},
|
||||
LabelsToApply: labels,
|
||||
}
|
||||
|
||||
// Embed the profile in a KVPair.
|
||||
@@ -125,7 +130,7 @@ func (c Converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, erro
|
||||
// invalid Pods, it is important that pods can only transition from not-valid to valid and not
|
||||
// the other way. If they transition from valid to invalid, we'll fail to emit a deletion
|
||||
// event in the watcher.
|
||||
func (c Converter) IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool {
|
||||
func (c converter) IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool {
|
||||
if c.IsHostNetworked(pod) {
|
||||
log.WithField("pod", pod.Name).Debug("Pod is host networked.")
|
||||
return false
|
||||
@@ -138,7 +143,7 @@ func (c Converter) IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool {
|
||||
|
||||
// IsReadyCalicoPod returns true if the pod is a valid Calico WorkloadEndpoint and has
|
||||
// an IP address assigned (i.e. it's ready for Calico networking).
|
||||
func (c Converter) IsReadyCalicoPod(pod *kapiv1.Pod) bool {
|
||||
func (c converter) IsReadyCalicoPod(pod *kapiv1.Pod) bool {
|
||||
if !c.IsValidCalicoWorkloadEndpoint(pod) {
|
||||
return false
|
||||
} else if !c.HasIPAddress(pod) {
|
||||
@@ -154,204 +159,116 @@ const (
|
||||
podCompleted kapiv1.PodPhase = "Completed"
|
||||
)
|
||||
|
||||
func (c Converter) IsFinished(pod *kapiv1.Pod) bool {
|
||||
func IsFinished(pod *kapiv1.Pod) bool {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
// Pod is being deleted but it may still be in its termination grace period. If Calico CNI
|
||||
// was used, then we use AnnotationPodIP to signal the moment that the pod actually loses its
|
||||
// IP by setting the annotation to "". (Otherwise, just fall back on the status of the pod.)
|
||||
if ip, ok := pod.Annotations[AnnotationPodIP]; ok && ip == "" {
|
||||
// AnnotationPodIP is explicitly set to empty string, Calico CNI has removed the network
|
||||
// from the pod.
|
||||
log.Debug("Pod is being deleted and IPs have been removed by Calico CNI.")
|
||||
return true
|
||||
} else if ips, ok := pod.Annotations[AnnotationAWSPodIPs]; ok && ips == "" {
|
||||
// AnnotationAWSPodIPs is explicitly set to empty string, AWS CNI has removed the network
|
||||
// from the pod.
|
||||
log.Debug("Pod is being deleted and IPs have been removed by AWS CNI.")
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case kapiv1.PodFailed, kapiv1.PodSucceeded, podCompleted:
|
||||
log.Debug("Pod phase is failed/succeeded/completed.")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c Converter) IsScheduled(pod *kapiv1.Pod) bool {
|
||||
func (c converter) IsScheduled(pod *kapiv1.Pod) bool {
|
||||
return pod.Spec.NodeName != ""
|
||||
}
|
||||
|
||||
func (c Converter) IsHostNetworked(pod *kapiv1.Pod) bool {
|
||||
func (c converter) IsHostNetworked(pod *kapiv1.Pod) bool {
|
||||
return pod.Spec.HostNetwork
|
||||
}
|
||||
|
||||
func (c Converter) HasIPAddress(pod *kapiv1.Pod) bool {
|
||||
return pod.Status.PodIP != "" || pod.Annotations[AnnotationPodIP] != ""
|
||||
func (c converter) HasIPAddress(pod *kapiv1.Pod) bool {
|
||||
return pod.Status.PodIP != "" || pod.Annotations[AnnotationPodIP] != "" || pod.Annotations[AnnotationAWSPodIPs] != ""
|
||||
// Note: we don't need to check PodIPs and AnnotationPodIPs here, because those cannot be
|
||||
// non-empty if the corresponding singular field is empty.
|
||||
}
|
||||
|
||||
// GetPodIPs extracts the IP addresses from a Kubernetes Pod. At present, only a single IP
|
||||
// is expected/supported. GetPodIPs loads the IP either from the PodIP field, if present, or
|
||||
// the calico podIP annotation.
|
||||
func (c Converter) GetPodIPs(pod *kapiv1.Pod) ([]string, error) {
|
||||
var podIP string
|
||||
if podIP = pod.Status.PodIP; podIP != "" {
|
||||
log.WithField("ip", podIP).Debug("PodIP field filled in.")
|
||||
} else if podIP = pod.Annotations[AnnotationPodIP]; podIP != "" {
|
||||
log.WithField("ip", podIP).Debug("PodIP missing, falling back on Calico annotation.")
|
||||
// getPodIPs extracts the IP addresses from a Kubernetes Pod. We support a single IPv4 address
|
||||
// and/or a single IPv6. getPodIPs loads the IPs either from the PodIPs and PodIP field, if
|
||||
// present, or the calico podIP annotation.
|
||||
func getPodIPs(pod *kapiv1.Pod) ([]*cnet.IPNet, error) {
|
||||
logc := log.WithFields(log.Fields{"pod": pod.Name, "namespace": pod.Namespace})
|
||||
var podIPs []string
|
||||
if ips := pod.Status.PodIPs; len(ips) != 0 {
|
||||
logc.WithField("ips", ips).Debug("PodIPs field filled in")
|
||||
for _, ip := range ips {
|
||||
podIPs = append(podIPs, ip.IP)
|
||||
}
|
||||
} else if ip := pod.Status.PodIP; ip != "" {
|
||||
logc.WithField("ip", ip).Debug("PodIP field filled in")
|
||||
podIPs = append(podIPs, ip)
|
||||
} else if ips := pod.Annotations[AnnotationPodIPs]; ips != "" {
|
||||
logc.WithField("ips", ips).Debug("No PodStatus IPs, use Calico plural annotation")
|
||||
podIPs = append(podIPs, strings.Split(ips, ",")...)
|
||||
} else if ip := pod.Annotations[AnnotationPodIP]; ip != "" {
|
||||
logc.WithField("ip", ip).Debug("No PodStatus IPs, use Calico singular annotation")
|
||||
podIPs = append(podIPs, ip)
|
||||
} else if ips := pod.Annotations[AnnotationAWSPodIPs]; ips != "" {
|
||||
logc.WithField("ips", ips).Debug("No PodStatus IPs, use AWS VPC annotation")
|
||||
podIPs = append(podIPs, strings.Split(ips, ",")...)
|
||||
} else {
|
||||
log.WithField("ip", podIP).Debug("Pod has no IP.")
|
||||
logc.Debug("Pod has no IP")
|
||||
return nil, nil
|
||||
}
|
||||
_, ipNet, err := cnet.ParseCIDROrIP(podIP)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{"ip": podIP, "pod": pod.Name}).WithError(err).Error("Failed to parse pod IP")
|
||||
return nil, err
|
||||
var podIPNets []*cnet.IPNet
|
||||
for _, ip := range podIPs {
|
||||
_, ipNet, err := cnet.ParseCIDROrIP(ip)
|
||||
if err != nil {
|
||||
logc.WithFields(log.Fields{"ip": ip}).WithError(err).Error("Failed to parse pod IP")
|
||||
return nil, err
|
||||
}
|
||||
podIPNets = append(podIPNets, ipNet)
|
||||
}
|
||||
return []string{ipNet.String()}, nil
|
||||
return podIPNets, nil
|
||||
}
|
||||
|
||||
// PodToWorkloadEndpoint converts a Pod to a WorkloadEndpoint. It assumes the calling code
|
||||
// has verified that the provided Pod is valid to convert to a WorkloadEndpoint.
|
||||
// PodToWorkloadEndpoint requires a Pods Name and Node Name to be populated. It will
|
||||
// fail to convert from a Pod to WorkloadEndpoint otherwise.
|
||||
func (c Converter) PodToWorkloadEndpoint(pod *kapiv1.Pod) (*model.KVPair, error) {
|
||||
log.WithField("pod", pod).Debug("Converting pod to WorkloadEndpoint")
|
||||
// Get all the profiles that apply
|
||||
var profiles []string
|
||||
// StagedKubernetesNetworkPolicyToStagedName converts a StagedKubernetesNetworkPolicy name into a StagedNetworkPolicy name
|
||||
func (c converter) StagedKubernetesNetworkPolicyToStagedName(stagedK8sName string) string {
|
||||
return fmt.Sprintf(K8sNetworkPolicyNamePrefix + stagedK8sName)
|
||||
}
|
||||
|
||||
// Pull out the Namespace based profile off the pod name and Namespace.
|
||||
profiles = append(profiles, NamespaceProfileNamePrefix+pod.Namespace)
|
||||
|
||||
// Pull out the Serviceaccount based profile off the pod SA and namespace
|
||||
if pod.Spec.ServiceAccountName != "" {
|
||||
profiles = append(profiles, serviceAccountNameToProfileName(pod.Spec.ServiceAccountName, pod.Namespace))
|
||||
}
|
||||
|
||||
wepids := names.WorkloadEndpointIdentifiers{
|
||||
Node: pod.Spec.NodeName,
|
||||
Orchestrator: apiv3.OrchestratorKubernetes,
|
||||
Endpoint: "eth0",
|
||||
Pod: pod.Name,
|
||||
}
|
||||
wepName, err := wepids.CalculateWorkloadEndpointName(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipNets, err := c.GetPodIPs(pod)
|
||||
if err != nil {
|
||||
// IP address was present but malformed in some way, handle as an explicit failure.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.IsFinished(pod) {
|
||||
// Pod is finished but not yet deleted. In this state the IP will have been freed and returned to the pool
|
||||
// so we need to make sure we don't let the caller believe it still belongs to this endpoint.
|
||||
// Pods with no IPs will get filtered out before they get to Felix in the watcher syncer cache layer.
|
||||
// We can't pretend the workload endpoint is deleted _here_ because that would confuse users of the
|
||||
// native v3 Watch() API.
|
||||
ipNets = nil
|
||||
}
|
||||
|
||||
// Generate the interface name based on workload. This must match
|
||||
// the host-side veth configured by the CNI plugin.
|
||||
interfaceName := VethNameForWorkload(pod.Namespace, pod.Name)
|
||||
|
||||
// Build the labels map. Start with the pod labels, and append two additional labels for
|
||||
// namespace and orchestrator matches.
|
||||
labels := pod.Labels
|
||||
if labels == nil {
|
||||
labels = make(map[string]string, 2)
|
||||
}
|
||||
labels[apiv3.LabelNamespace] = pod.Namespace
|
||||
labels[apiv3.LabelOrchestrator] = apiv3.OrchestratorKubernetes
|
||||
|
||||
if pod.Spec.ServiceAccountName != "" {
|
||||
labels[apiv3.LabelServiceAccount] = pod.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
// Pull out floating IP annotation
|
||||
var floatingIPs []apiv3.IPNAT
|
||||
if annotation, ok := pod.Annotations["cni.projectcalico.org/floatingIPs"]; ok && len(ipNets) > 0 {
|
||||
|
||||
// Parse Annotation data
|
||||
var ips []string
|
||||
err := json.Unmarshal([]byte(annotation), &ips)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
|
||||
}
|
||||
|
||||
// Get target for NAT
|
||||
podip, podnet, err := cnet.ParseCIDROrIP(ipNets[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse pod IP: %s", err)
|
||||
}
|
||||
|
||||
netmask, _ := podnet.Mask.Size()
|
||||
|
||||
if netmask != 32 && netmask != 128 {
|
||||
return nil, fmt.Errorf("PodIP is not a valid IP: Mask size is %d, not 32 or 128", netmask)
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
floatingIPs = append(floatingIPs, apiv3.IPNAT{
|
||||
InternalIP: podip.String(),
|
||||
ExternalIP: ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Map any named ports through.
|
||||
var endpointPorts []apiv3.EndpointPort
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, containerPort := range container.Ports {
|
||||
if containerPort.Name != "" && containerPort.ContainerPort != 0 {
|
||||
var modelProto numorstring.Protocol
|
||||
switch containerPort.Protocol {
|
||||
case kapiv1.ProtocolUDP:
|
||||
modelProto = numorstring.ProtocolFromString("udp")
|
||||
case kapiv1.ProtocolTCP, kapiv1.Protocol("") /* K8s default is TCP. */ :
|
||||
modelProto = numorstring.ProtocolFromString("tcp")
|
||||
default:
|
||||
log.WithFields(log.Fields{
|
||||
"protocol": containerPort.Protocol,
|
||||
"pod": pod,
|
||||
"port": containerPort,
|
||||
}).Debug("Ignoring named port with unknown protocol")
|
||||
continue
|
||||
}
|
||||
|
||||
endpointPorts = append(endpointPorts, apiv3.EndpointPort{
|
||||
Name: containerPort.Name,
|
||||
Protocol: modelProto,
|
||||
Port: uint16(containerPort.ContainerPort),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create the workload endpoint.
|
||||
wep := apiv3.NewWorkloadEndpoint()
|
||||
wep.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: wepName,
|
||||
Namespace: pod.Namespace,
|
||||
CreationTimestamp: pod.CreationTimestamp,
|
||||
UID: pod.UID,
|
||||
Labels: labels,
|
||||
GenerateName: pod.GenerateName,
|
||||
}
|
||||
wep.Spec = apiv3.WorkloadEndpointSpec{
|
||||
Orchestrator: "k8s",
|
||||
Node: pod.Spec.NodeName,
|
||||
Pod: pod.Name,
|
||||
Endpoint: "eth0",
|
||||
InterfaceName: interfaceName,
|
||||
Profiles: profiles,
|
||||
IPNetworks: ipNets,
|
||||
Ports: endpointPorts,
|
||||
IPNATs: floatingIPs,
|
||||
}
|
||||
|
||||
// Embed the workload endpoint into a KVPair.
|
||||
kvp := model.KVPair{
|
||||
// EndpointSliceToKVP converts a k8s EndpointSlice to a model.KVPair.
|
||||
func (c converter) EndpointSliceToKVP(slice *discovery.EndpointSlice) (*model.KVPair, error) {
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: wepName,
|
||||
Namespace: pod.Namespace,
|
||||
Kind: apiv3.KindWorkloadEndpoint,
|
||||
Name: slice.Name,
|
||||
Namespace: slice.Namespace,
|
||||
Kind: model.KindKubernetesEndpointSlice,
|
||||
},
|
||||
Value: wep,
|
||||
Revision: pod.ResourceVersion,
|
||||
}
|
||||
return &kvp, nil
|
||||
Value: slice.DeepCopy(),
|
||||
Revision: slice.ResourceVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c converter) ServiceToKVP(service *kapiv1.Service) (*model.KVPair, error) {
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: service.Name,
|
||||
Namespace: service.Namespace,
|
||||
Kind: model.KindKubernetesService,
|
||||
},
|
||||
Value: service.DeepCopy(),
|
||||
Revision: service.ResourceVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// K8sNetworkPolicyToCalico converts a k8s NetworkPolicy to a model.KVPair.
|
||||
func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*model.KVPair, error) {
|
||||
func (c converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*model.KVPair, error) {
|
||||
// Pull out important fields.
|
||||
policyName := fmt.Sprintf(K8sNetworkPolicyNamePrefix + np.Name)
|
||||
|
||||
@@ -359,12 +276,16 @@ func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*mo
|
||||
// This order might change in future.
|
||||
order := float64(1000.0)
|
||||
|
||||
errorTracker := cerrors.ErrorPolicyConversion{PolicyName: np.Name}
|
||||
|
||||
// Generate the ingress rules list.
|
||||
var ingressRules []apiv3.Rule
|
||||
for _, r := range np.Spec.Ingress {
|
||||
rules, err := c.k8sRuleToCalico(r.From, r.Ports, np.Namespace, true)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("dropping k8s rule that couldn't be converted.")
|
||||
// Add rule to conversion error slice
|
||||
errorTracker.BadIngressRule(&r, fmt.Sprintf("k8s rule couldn't be converted: %s", err))
|
||||
} else {
|
||||
ingressRules = append(ingressRules, rules...)
|
||||
}
|
||||
@@ -376,6 +297,8 @@ func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*mo
|
||||
rules, err := c.k8sRuleToCalico(r.To, r.Ports, np.Namespace, false)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("dropping k8s rule that couldn't be converted")
|
||||
// Add rule to conversion error slice
|
||||
errorTracker.BadEgressRule(&r, fmt.Sprintf("k8s rule couldn't be converted: %s", err))
|
||||
} else {
|
||||
egressRules = append(egressRules, rules...)
|
||||
}
|
||||
@@ -419,6 +342,7 @@ func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*mo
|
||||
Namespace: np.Namespace,
|
||||
CreationTimestamp: np.CreationTimestamp,
|
||||
UID: np.UID,
|
||||
ResourceVersion: np.ResourceVersion,
|
||||
}
|
||||
policy.Spec = apiv3.NetworkPolicySpec{
|
||||
Order: &order,
|
||||
@@ -428,8 +352,8 @@ func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*mo
|
||||
Types: types,
|
||||
}
|
||||
|
||||
// Build and return the KVPair.
|
||||
return &model.KVPair{
|
||||
// Build the KVPair.
|
||||
kvp := &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: policyName,
|
||||
Namespace: np.Namespace,
|
||||
@@ -437,12 +361,15 @@ func (c Converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*mo
|
||||
},
|
||||
Value: policy,
|
||||
Revision: np.ResourceVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Return the KVPair with conversion errors if applicable
|
||||
return kvp, errorTracker.GetError()
|
||||
}
|
||||
|
||||
// k8sSelectorToCalico takes a namespaced k8s label selector and returns the Calico
|
||||
// equivalent.
|
||||
func (c Converter) k8sSelectorToCalico(s *metav1.LabelSelector, selectorType selectorType) string {
|
||||
func (c converter) k8sSelectorToCalico(s *metav1.LabelSelector, selectorType selectorType) string {
|
||||
// Only prefix pod selectors - this won't work for namespace selectors.
|
||||
selectors := []string{}
|
||||
if selectorType == SelectorPod {
|
||||
@@ -492,7 +419,7 @@ func (c Converter) k8sSelectorToCalico(s *metav1.LabelSelector, selectorType sel
|
||||
return strings.Join(selectors, " && ")
|
||||
}
|
||||
|
||||
func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPorts []networkingv1.NetworkPolicyPort, ns string, ingress bool) ([]apiv3.Rule, error) {
|
||||
func (c converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPorts []networkingv1.NetworkPolicyPort, ns string, ingress bool) ([]apiv3.Rule, error) {
|
||||
rules := []apiv3.Rule{}
|
||||
peers := []*networkingv1.NetworkPolicyPeer{}
|
||||
ports := []*networkingv1.NetworkPolicyPort{}
|
||||
@@ -514,15 +441,19 @@ func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPor
|
||||
if p.Port != nil {
|
||||
portval := intstr.FromString(p.Port.String())
|
||||
port.Port = &portval
|
||||
|
||||
}
|
||||
if p.Protocol != nil {
|
||||
protval := kapiv1.Protocol(fmt.Sprintf("%s", *p.Protocol))
|
||||
port.Protocol = &protval
|
||||
} else {
|
||||
// TCP is the implicit default (as per the definition of NetworkPolicyPort).
|
||||
// Make the default explicit here because our data-model always requires
|
||||
// the protocol to be specified if we're doing a port match.
|
||||
port.Protocol = &protoTCP
|
||||
}
|
||||
if p.Protocol != nil {
|
||||
protval := kapiv1.Protocol(fmt.Sprintf("%s", *p.Protocol))
|
||||
port.Protocol = &protval
|
||||
|
||||
if p.EndPort != nil {
|
||||
port.EndPort = p.EndPort
|
||||
}
|
||||
ports = append(ports, &port)
|
||||
}
|
||||
@@ -543,7 +474,6 @@ func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPor
|
||||
return nil, fmt.Errorf("failed to parse k8s port: %s", err)
|
||||
}
|
||||
|
||||
// These are either both present or both nil
|
||||
if protocol == nil && calicoPorts == nil {
|
||||
// If nil, no ports were specified, or an empty port struct was provided, which we translate to allowing all.
|
||||
// We want to use a nil protocol and a nil list of ports, which will allow any destination (for ingress).
|
||||
@@ -553,7 +483,14 @@ func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPor
|
||||
}
|
||||
|
||||
pStr := protocol.String()
|
||||
protocolPorts[pStr] = append(protocolPorts[pStr], calicoPorts...)
|
||||
// treat nil as 'all ports'
|
||||
if calicoPorts == nil {
|
||||
protocolPorts[pStr] = nil
|
||||
} else if _, ok := protocolPorts[pStr]; !ok || len(protocolPorts[pStr]) > 0 {
|
||||
// don't overwrite a nil (allow all ports) if present; if no ports yet for this protocol
|
||||
// or 1+ ports which aren't 'all ports', then add the present ports
|
||||
protocolPorts[pStr] = append(protocolPorts[pStr], calicoPorts...)
|
||||
}
|
||||
}
|
||||
|
||||
protocols := make([]string, 0, len(protocolPorts))
|
||||
@@ -567,6 +504,7 @@ func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPor
|
||||
// with each rule containing all the allowed ports.
|
||||
for _, protocolStr := range protocols {
|
||||
calicoPorts := protocolPorts[protocolStr]
|
||||
calicoPorts = SimplifyPorts(calicoPorts)
|
||||
|
||||
var protocol *numorstring.Protocol
|
||||
if protocolStr != "" {
|
||||
@@ -610,7 +548,77 @@ func (c Converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPor
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (c Converter) k8sPortToCalicoFields(port *networkingv1.NetworkPolicyPort) (protocol *numorstring.Protocol, dstPorts []numorstring.Port, err error) {
|
||||
// SimplifyPorts calculates a minimum set of port ranges that cover the given set of ports.
|
||||
// For example, if the input was [80, 81, 82, 9090, "foo"] the output would consist of
|
||||
// [80-82, 9090, "foo"] in some order.
|
||||
func SimplifyPorts(ports []numorstring.Port) []numorstring.Port {
|
||||
if len(ports) <= 1 {
|
||||
return ports
|
||||
}
|
||||
var numericPorts []int
|
||||
var outputPorts []numorstring.Port
|
||||
for _, p := range ports {
|
||||
if p.PortName != "" {
|
||||
// Pass named ports through immediately, there's nothing to be done for them.
|
||||
outputPorts = append(outputPorts, p)
|
||||
} else {
|
||||
// Work with ints to avoid overflow with the uint16 port type.
|
||||
// In practice, we currently only get single ports here so this
|
||||
// loop should run exactly once.
|
||||
for i := int(p.MinPort); i <= int(p.MaxPort); i++ {
|
||||
numericPorts = append(numericPorts, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(numericPorts) <= 1 {
|
||||
// We have nothing to combine, short-circuit.
|
||||
return ports
|
||||
}
|
||||
|
||||
// Sort the ports so it will be easy to find ranges.
|
||||
sort.Ints(numericPorts)
|
||||
|
||||
// Each pass around this outer loop extracts one port range from the sorted slice
|
||||
// and it moves the slice along to the start of the next range.
|
||||
for len(numericPorts) > 0 {
|
||||
// Initialise the next range to the contain only the first port in the slice.
|
||||
firstPortInRange := numericPorts[0]
|
||||
lastPortInRange := firstPortInRange
|
||||
|
||||
// Scan ahead, looking for ports that can be combined into this range.
|
||||
numericPorts = numericPorts[1:]
|
||||
for len(numericPorts) > 0 {
|
||||
nextPort := numericPorts[0]
|
||||
if nextPort > lastPortInRange+1 {
|
||||
// This port can't be coalesced with the existing range, break out so
|
||||
// that we record the range; then we'll loop again and pick up this
|
||||
// port as the start of a new range.
|
||||
break
|
||||
}
|
||||
// The next port is either equal to the last port (due to a duplicate port
|
||||
// in the input) or it is exactly one greater. Extend the range to include
|
||||
// it.
|
||||
lastPortInRange = nextPort
|
||||
numericPorts = numericPorts[1:]
|
||||
}
|
||||
|
||||
// Record the port.
|
||||
outputPorts = appendPortRange(outputPorts, firstPortInRange, lastPortInRange)
|
||||
}
|
||||
|
||||
return outputPorts
|
||||
}
|
||||
|
||||
func appendPortRange(ports []numorstring.Port, first, last int) []numorstring.Port {
|
||||
portRange, err := numorstring.PortFromRange(uint16(first), uint16(last))
|
||||
if err != nil {
|
||||
log.WithError(err).Panic("Failed to make port range from ports that should have been pre-validated.")
|
||||
}
|
||||
return append(ports, portRange)
|
||||
}
|
||||
|
||||
func (c converter) k8sPortToCalicoFields(port *networkingv1.NetworkPolicyPort) (protocol *numorstring.Protocol, dstPorts []numorstring.Port, err error) {
|
||||
// If no port info, return zero values for all fields (protocol, dstPorts).
|
||||
if port == nil {
|
||||
return
|
||||
@@ -624,7 +632,7 @@ func (c Converter) k8sPortToCalicoFields(port *networkingv1.NetworkPolicyPort) (
|
||||
return
|
||||
}
|
||||
|
||||
func (c Converter) k8sProtocolToCalico(protocol *kapiv1.Protocol) *numorstring.Protocol {
|
||||
func (c converter) k8sProtocolToCalico(protocol *kapiv1.Protocol) *numorstring.Protocol {
|
||||
if protocol != nil {
|
||||
p := numorstring.ProtocolFromString(string(*protocol))
|
||||
return &p
|
||||
@@ -632,7 +640,7 @@ func (c Converter) k8sProtocolToCalico(protocol *kapiv1.Protocol) *numorstring.P
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Converter) k8sPeerToCalicoFields(peer *networkingv1.NetworkPolicyPeer, ns string) (selector, nsSelector string, nets []string, notNets []string) {
|
||||
func (c converter) k8sPeerToCalicoFields(peer *networkingv1.NetworkPolicyPeer, ns string) (selector, nsSelector string, nets []string, notNets []string) {
|
||||
// If no peer, return zero values for all fields (selector, nets and !nets).
|
||||
if peer == nil {
|
||||
return
|
||||
@@ -668,12 +676,16 @@ func (c Converter) k8sPeerToCalicoFields(peer *networkingv1.NetworkPolicyPeer, n
|
||||
return
|
||||
}
|
||||
|
||||
func (c Converter) k8sPortToCalico(port networkingv1.NetworkPolicyPort) ([]numorstring.Port, error) {
|
||||
func (c converter) k8sPortToCalico(port networkingv1.NetworkPolicyPort) ([]numorstring.Port, error) {
|
||||
var portList []numorstring.Port
|
||||
if port.Port != nil {
|
||||
p, err := numorstring.PortFromString(port.Port.String())
|
||||
calicoPort := port.Port.String()
|
||||
if port.EndPort != nil {
|
||||
calicoPort = fmt.Sprintf("%s:%d", calicoPort, *port.EndPort)
|
||||
}
|
||||
p, err := numorstring.PortFromString(calicoPort)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port %+v: %s", port.Port, err)
|
||||
return nil, fmt.Errorf("invalid port %+v: %s", calicoPort, err)
|
||||
}
|
||||
return append(portList, p), nil
|
||||
}
|
||||
@@ -683,7 +695,7 @@ func (c Converter) k8sPortToCalico(port networkingv1.NetworkPolicyPort) ([]numor
|
||||
}
|
||||
|
||||
// ProfileNameToNamespace extracts the Namespace name from the given Profile name.
|
||||
func (c Converter) ProfileNameToNamespace(profileName string) (string, error) {
|
||||
func (c converter) ProfileNameToNamespace(profileName string) (string, error) {
|
||||
// Profile objects backed by Namespaces have form "kns.<ns_name>"
|
||||
if !strings.HasPrefix(profileName, NamespaceProfileNamePrefix) {
|
||||
// This is not backed by a Kubernetes Namespace.
|
||||
@@ -693,30 +705,6 @@ func (c Converter) ProfileNameToNamespace(profileName string) (string, error) {
|
||||
return strings.TrimPrefix(profileName, NamespaceProfileNamePrefix), nil
|
||||
}
|
||||
|
||||
// JoinNetworkPolicyRevisions constructs the revision from the individual CRD and K8s NetworkPolicy
|
||||
// revisions.
|
||||
func (c Converter) JoinNetworkPolicyRevisions(crdNPRev, k8sNPRev string) string {
|
||||
return crdNPRev + "/" + k8sNPRev
|
||||
}
|
||||
|
||||
// SplitNetworkPolicyRevision extracts the CRD and K8s NetworkPolicy revisions from the combined
|
||||
// revision returned on the KDD NetworkPolicy client.
|
||||
func (c Converter) SplitNetworkPolicyRevision(rev string) (crdNPRev string, k8sNPRev string, err error) {
|
||||
if rev == "" {
|
||||
return
|
||||
}
|
||||
|
||||
revs := strings.Split(rev, "/")
|
||||
if len(revs) != 2 {
|
||||
err = fmt.Errorf("ResourceVersion is not valid: %s", rev)
|
||||
return
|
||||
}
|
||||
|
||||
crdNPRev = revs[0]
|
||||
k8sNPRev = revs[1]
|
||||
return
|
||||
}
|
||||
|
||||
// serviceAccountNameToProfileName creates a profile name that is a join
|
||||
// of 'ksa.' + namespace + "." + serviceaccount name.
|
||||
func serviceAccountNameToProfileName(sa, namespace string) string {
|
||||
@@ -731,7 +719,7 @@ func serviceAccountNameToProfileName(sa, namespace string) string {
|
||||
// ServiceAccountToProfile converts a ServiceAccount to a Calico Profile. The Profile stores
|
||||
// labels from the ServiceAccount which are inherited by the WorkloadEndpoints within
|
||||
// the Profile.
|
||||
func (c Converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KVPair, error) {
|
||||
func (c converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KVPair, error) {
|
||||
// Generate the labels to apply to the profile, using a special prefix
|
||||
// to indicate that these are the labels from the parent Kubernetes ServiceAccount.
|
||||
labels := map[string]string{}
|
||||
@@ -739,6 +727,10 @@ func (c Converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KV
|
||||
labels[ServiceAccountLabelPrefix+k] = v
|
||||
}
|
||||
|
||||
// Add a label for the serviceaccount's name. This allows exact namespace matching
|
||||
// based on name within the serviceAccountSelector.
|
||||
labels[ServiceAccountLabelPrefix+NameLabel] = sa.Name
|
||||
|
||||
name := serviceAccountNameToProfileName(sa.Name, sa.Namespace)
|
||||
profile := apiv3.NewProfile()
|
||||
profile.ObjectMeta = metav1.ObjectMeta{
|
||||
@@ -746,15 +738,7 @@ func (c Converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KV
|
||||
CreationTimestamp: sa.CreationTimestamp,
|
||||
UID: sa.UID,
|
||||
}
|
||||
|
||||
// Only set labels to apply when there are actually labels. This makes the
|
||||
// result of this function consistent with the struct as loaded directly
|
||||
// from etcd, which uses nil for the empty map.
|
||||
if len(labels) != 0 {
|
||||
profile.Spec.LabelsToApply = labels
|
||||
} else {
|
||||
profile.Spec.LabelsToApply = nil
|
||||
}
|
||||
profile.Spec.LabelsToApply = labels
|
||||
|
||||
// Embed the profile in a KVPair.
|
||||
kvp := model.KVPair{
|
||||
@@ -769,7 +753,7 @@ func (c Converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KV
|
||||
}
|
||||
|
||||
// ProfileNameToServiceAccount extracts the ServiceAccount name from the given Profile name.
|
||||
func (c Converter) ProfileNameToServiceAccount(profileName string) (ns, sa string, err error) {
|
||||
func (c converter) ProfileNameToServiceAccount(profileName string) (ns, sa string, err error) {
|
||||
|
||||
// Profile objects backed by ServiceAccounts have form "ksa.<namespace>.<sa_name>"
|
||||
if !strings.HasPrefix(profileName, ServiceAccountProfileNamePrefix) {
|
||||
@@ -792,15 +776,15 @@ func (c Converter) ProfileNameToServiceAccount(profileName string) (ns, sa strin
|
||||
// JoinProfileRevisions constructs the revision from the individual namespace and serviceaccount
|
||||
// revisions.
|
||||
// This is conditional on the feature flag for serviceaccount set or not.
|
||||
func (c Converter) JoinProfileRevisions(nsRev, saRev string) string {
|
||||
func (c converter) JoinProfileRevisions(nsRev, saRev string) string {
|
||||
return nsRev + "/" + saRev
|
||||
}
|
||||
|
||||
// SplitProfileRevision extracts the namespace and serviceaccount revisions from the combined
|
||||
// revision returned on the KDD service account based profile.
|
||||
// This is conditional on the feature flag for serviceaccount set or not.
|
||||
func (c Converter) SplitProfileRevision(rev string) (nsRev string, saRev string, err error) {
|
||||
if rev == "" {
|
||||
func (c converter) SplitProfileRevision(rev string) (nsRev string, saRev string, err error) {
|
||||
if rev == "" || rev == "0" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -809,8 +793,19 @@ func (c Converter) SplitProfileRevision(rev string) (nsRev string, saRev string,
|
||||
err = fmt.Errorf("ResourceVersion is not valid: %s", rev)
|
||||
return
|
||||
}
|
||||
|
||||
nsRev = revs[0]
|
||||
saRev = revs[1]
|
||||
return
|
||||
}
|
||||
|
||||
func stringsToIPNets(ipStrings []string) ([]*cnet.IPNet, error) {
|
||||
var podIPNets []*cnet.IPNet
|
||||
for _, ip := range ipStrings {
|
||||
_, ipNet, err := cnet.ParseCIDROrIP(ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
podIPNets = append(podIPNets, ipNet)
|
||||
}
|
||||
return podIPNets, nil
|
||||
}
|
||||
32
vendor/github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion/workload_endpoint.go
generated
vendored
Normal file
32
vendor/github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion/workload_endpoint.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// TODO move the WorkloadEndpoint converters to is own package. Some refactoring of the annotation and label constants
|
||||
// is necessary to avoid circular imports, which is why this has been deferred.
|
||||
package conversion
|
||||
|
||||
import (
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
type WorkloadEndpointConverter interface {
|
||||
VethNameForWorkload(namespace, podName string) string
|
||||
PodToWorkloadEndpoints(pod *kapiv1.Pod) ([]*model.KVPair, error)
|
||||
}
|
||||
|
||||
func NewWorkloadEndpointConverter() WorkloadEndpointConverter {
|
||||
return &defaultWorkloadEndpointConverter{}
|
||||
}
|
||||
285
vendor/github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion/workload_endpoint_default.go
generated
vendored
Normal file
285
vendor/github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion/workload_endpoint_default.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package conversion
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/json"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/names"
|
||||
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
type defaultWorkloadEndpointConverter struct{}
|
||||
|
||||
// VethNameForWorkload returns a deterministic veth name
|
||||
// for the given Kubernetes workload (WEP) name and namespace.
|
||||
func (wc defaultWorkloadEndpointConverter) VethNameForWorkload(namespace, podname string) string {
|
||||
// A SHA1 is always 20 bytes long, and so is sufficient for generating the
|
||||
// veth name and mac addr.
|
||||
h := sha1.New()
|
||||
h.Write([]byte(fmt.Sprintf("%s.%s", namespace, podname)))
|
||||
prefix := os.Getenv("FELIX_INTERFACEPREFIX")
|
||||
if prefix == "" {
|
||||
// Prefix is not set. Default to "cali"
|
||||
prefix = "cali"
|
||||
} else {
|
||||
// Prefix is set - use the first value in the list.
|
||||
splits := strings.Split(prefix, ",")
|
||||
prefix = splits[0]
|
||||
}
|
||||
log.WithField("prefix", prefix).Debugf("Using prefix to create a WorkloadEndpoint veth name")
|
||||
return fmt.Sprintf("%s%s", prefix, hex.EncodeToString(h.Sum(nil))[:11])
|
||||
}
|
||||
|
||||
func (wc defaultWorkloadEndpointConverter) PodToWorkloadEndpoints(pod *kapiv1.Pod) ([]*model.KVPair, error) {
|
||||
wep, err := wc.podToDefaultWorkloadEndpoint(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []*model.KVPair{wep}, nil
|
||||
}
|
||||
|
||||
// PodToWorkloadEndpoint converts a Pod to a WorkloadEndpoint. It assumes the calling code
|
||||
// has verified that the provided Pod is valid to convert to a WorkloadEndpoint.
|
||||
// PodToWorkloadEndpoint requires a Pods Name and Node Name to be populated. It will
|
||||
// fail to convert from a Pod to WorkloadEndpoint otherwise.
|
||||
func (wc defaultWorkloadEndpointConverter) podToDefaultWorkloadEndpoint(pod *kapiv1.Pod) (*model.KVPair, error) {
|
||||
log.WithField("pod", pod).Debug("Converting pod to WorkloadEndpoint")
|
||||
// Get all the profiles that apply
|
||||
var profiles []string
|
||||
|
||||
// Pull out the Namespace based profile off the pod name and Namespace.
|
||||
profiles = append(profiles, NamespaceProfileNamePrefix+pod.Namespace)
|
||||
|
||||
// Pull out the Serviceaccount based profile off the pod SA and namespace
|
||||
if pod.Spec.ServiceAccountName != "" {
|
||||
profiles = append(profiles, serviceAccountNameToProfileName(pod.Spec.ServiceAccountName, pod.Namespace))
|
||||
}
|
||||
|
||||
wepids := names.WorkloadEndpointIdentifiers{
|
||||
Node: pod.Spec.NodeName,
|
||||
Orchestrator: apiv3.OrchestratorKubernetes,
|
||||
Endpoint: "eth0",
|
||||
Pod: pod.Name,
|
||||
}
|
||||
wepName, err := wepids.CalculateWorkloadEndpointName(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podIPNets, err := getPodIPs(pod)
|
||||
if err != nil {
|
||||
// IP address was present but malformed in some way, handle as an explicit failure.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if IsFinished(pod) {
|
||||
// Pod is finished but not yet deleted. In this state the IP will have been freed and returned to the pool
|
||||
// so we need to make sure we don't let the caller believe it still belongs to this endpoint.
|
||||
// Pods with no IPs will get filtered out before they get to Felix in the watcher syncer cache layer.
|
||||
// We can't pretend the workload endpoint is deleted _here_ because that would confuse users of the
|
||||
// native v3 Watch() API.
|
||||
log.Debug("Pod is in a 'finished' state so no longer owns its IP(s).")
|
||||
podIPNets = nil
|
||||
}
|
||||
|
||||
ipNets := []string{}
|
||||
for _, ipNet := range podIPNets {
|
||||
ipNets = append(ipNets, ipNet.String())
|
||||
}
|
||||
|
||||
// Generate the interface name based on workload. This must match
|
||||
// the host-side veth configured by the CNI plugin.
|
||||
interfaceName := wc.VethNameForWorkload(pod.Namespace, pod.Name)
|
||||
|
||||
// Build the labels map. Start with the pod labels, and append two additional labels for
|
||||
// namespace and orchestrator matches.
|
||||
labels := pod.Labels
|
||||
if labels == nil {
|
||||
labels = make(map[string]string, 2)
|
||||
}
|
||||
labels[apiv3.LabelNamespace] = pod.Namespace
|
||||
labels[apiv3.LabelOrchestrator] = apiv3.OrchestratorKubernetes
|
||||
|
||||
if pod.Spec.ServiceAccountName != "" && len(pod.Spec.ServiceAccountName) < 63 {
|
||||
// For backwards compatibility, include the label if less than 63 characters.
|
||||
labels[apiv3.LabelServiceAccount] = pod.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
// Pull out floating IP annotation
|
||||
var floatingIPs []libapiv3.IPNAT
|
||||
if annotation, ok := pod.Annotations["cni.projectcalico.org/floatingIPs"]; ok && len(podIPNets) > 0 {
|
||||
|
||||
// Parse Annotation data
|
||||
var ips []string
|
||||
err := json.Unmarshal([]byte(annotation), &ips)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
|
||||
}
|
||||
|
||||
// Get IPv4 and IPv6 targets for NAT
|
||||
var podnetV4, podnetV6 *cnet.IPNet
|
||||
for _, ipNet := range podIPNets {
|
||||
if ipNet.IP.To4() != nil {
|
||||
podnetV4 = ipNet
|
||||
netmask, _ := podnetV4.Mask.Size()
|
||||
if netmask != 32 {
|
||||
return nil, fmt.Errorf("PodIP %v is not a valid IPv4: Mask size is %d, not 32", ipNet, netmask)
|
||||
}
|
||||
} else {
|
||||
podnetV6 = ipNet
|
||||
netmask, _ := podnetV6.Mask.Size()
|
||||
if netmask != 128 {
|
||||
return nil, fmt.Errorf("PodIP %v is not a valid IPv6: Mask size is %d, not 128", ipNet, netmask)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
if strings.Contains(ip, ":") {
|
||||
if podnetV6 != nil {
|
||||
floatingIPs = append(floatingIPs, libapiv3.IPNAT{
|
||||
InternalIP: podnetV6.IP.String(),
|
||||
ExternalIP: ip,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if podnetV4 != nil {
|
||||
floatingIPs = append(floatingIPs, libapiv3.IPNAT{
|
||||
InternalIP: podnetV4.IP.String(),
|
||||
ExternalIP: ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle source IP spoofing annotation
|
||||
var sourcePrefixes []string
|
||||
if annotation, ok := pod.Annotations["cni.projectcalico.org/allowedSourcePrefixes"]; ok && annotation != "" {
|
||||
// Parse Annotation data
|
||||
var requestedSourcePrefixes []string
|
||||
err := json.Unmarshal([]byte(annotation), &requestedSourcePrefixes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
|
||||
}
|
||||
|
||||
// Filter out any invalid entries and normalize the CIDRs.
|
||||
for _, prefix := range requestedSourcePrefixes {
|
||||
if _, n, err := cnet.ParseCIDR(prefix); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse '%s' as a CIDR: %s", prefix, err)
|
||||
} else {
|
||||
sourcePrefixes = append(sourcePrefixes, n.String())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Map any named ports through.
|
||||
var endpointPorts []libapiv3.WorkloadEndpointPort
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, containerPort := range container.Ports {
|
||||
if containerPort.ContainerPort != 0 && (containerPort.HostPort != 0 || containerPort.Name != "") {
|
||||
var modelProto numorstring.Protocol
|
||||
switch containerPort.Protocol {
|
||||
case kapiv1.ProtocolUDP:
|
||||
modelProto = numorstring.ProtocolFromString("udp")
|
||||
case kapiv1.ProtocolSCTP:
|
||||
modelProto = numorstring.ProtocolFromString("sctp")
|
||||
case kapiv1.ProtocolTCP, kapiv1.Protocol("") /* K8s default is TCP. */ :
|
||||
modelProto = numorstring.ProtocolFromString("tcp")
|
||||
default:
|
||||
log.WithFields(log.Fields{
|
||||
"protocol": containerPort.Protocol,
|
||||
"pod": pod,
|
||||
"port": containerPort,
|
||||
}).Debug("Ignoring named port with unknown protocol")
|
||||
continue
|
||||
}
|
||||
|
||||
endpointPorts = append(endpointPorts, libapiv3.WorkloadEndpointPort{
|
||||
Name: containerPort.Name,
|
||||
Protocol: modelProto,
|
||||
Port: uint16(containerPort.ContainerPort),
|
||||
HostPort: uint16(containerPort.HostPort),
|
||||
HostIP: containerPort.HostIP,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the container ID if present. This is used in the CNI plugin to distinguish different pods that have
|
||||
// the same name. For example, restarted stateful set pods.
|
||||
containerID := pod.Annotations[AnnotationContainerID]
|
||||
|
||||
// Create the workload endpoint.
|
||||
wep := libapiv3.NewWorkloadEndpoint()
|
||||
wep.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: wepName,
|
||||
Namespace: pod.Namespace,
|
||||
CreationTimestamp: pod.CreationTimestamp,
|
||||
UID: pod.UID,
|
||||
Labels: labels,
|
||||
GenerateName: pod.GenerateName,
|
||||
}
|
||||
wep.Spec = libapiv3.WorkloadEndpointSpec{
|
||||
Orchestrator: "k8s",
|
||||
Node: pod.Spec.NodeName,
|
||||
Pod: pod.Name,
|
||||
ContainerID: containerID,
|
||||
Endpoint: "eth0",
|
||||
InterfaceName: interfaceName,
|
||||
Profiles: profiles,
|
||||
IPNetworks: ipNets,
|
||||
Ports: endpointPorts,
|
||||
IPNATs: floatingIPs,
|
||||
ServiceAccountName: pod.Spec.ServiceAccountName,
|
||||
AllowSpoofedSourcePrefixes: sourcePrefixes,
|
||||
}
|
||||
|
||||
if v, ok := pod.Annotations["k8s.v1.cni.cncf.io/network-status"]; ok {
|
||||
if wep.Annotations == nil {
|
||||
wep.Annotations = make(map[string]string)
|
||||
}
|
||||
wep.Annotations["k8s.v1.cni.cncf.io/network-status"] = v
|
||||
}
|
||||
|
||||
// Embed the workload endpoint into a KVPair.
|
||||
kvp := model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: wepName,
|
||||
Namespace: pod.Namespace,
|
||||
Kind: libapiv3.KindWorkloadEndpoint,
|
||||
},
|
||||
Value: wep,
|
||||
Revision: pod.ResourceVersion,
|
||||
}
|
||||
return &kvp, nil
|
||||
}
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -16,25 +16,31 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
matchGlobalBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/global/peer_v./([^/]+)$")
|
||||
matchHostBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/host/([^/]+)/peer_v./([^/]+)$")
|
||||
typeBGPPeer = reflect.TypeOf(BGPPeer{})
|
||||
matchGlobalBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/global/peer_v./([^/]+)$")
|
||||
matchHostBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/host/([^/]+)/peer_v./([^/]+)$")
|
||||
typeBGPPeer = reflect.TypeOf(BGPPeer{})
|
||||
ipPortSeparator = "-"
|
||||
defaultPort uint16 = 179
|
||||
)
|
||||
|
||||
type NodeBGPPeerKey struct {
|
||||
Nodename string `json:"-" validate:"omitempty"`
|
||||
PeerIP net.IP `json:"-" validate:"required"`
|
||||
Port uint16 `json:"-" validate:"omitempty"`
|
||||
}
|
||||
|
||||
func (key NodeBGPPeerKey) defaultPath() (string, error) {
|
||||
@@ -45,7 +51,7 @@ func (key NodeBGPPeerKey) defaultPath() (string, error) {
|
||||
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
|
||||
}
|
||||
e := fmt.Sprintf("/calico/bgp/v1/host/%s/peer_v%d/%s",
|
||||
key.Nodename, key.PeerIP.Version(), key.PeerIP)
|
||||
key.Nodename, key.PeerIP.Version(), combineIPAndPort(key.PeerIP, key.Port))
|
||||
return e, nil
|
||||
}
|
||||
|
||||
@@ -62,12 +68,13 @@ func (key NodeBGPPeerKey) valueType() (reflect.Type, error) {
|
||||
}
|
||||
|
||||
func (key NodeBGPPeerKey) String() string {
|
||||
return fmt.Sprintf("BGPPeer(node=%s, ip=%s)", key.Nodename, key.PeerIP)
|
||||
return fmt.Sprintf("BGPPeer(node=%s, ip=%s, port=%d)", key.Nodename, key.PeerIP, key.Port)
|
||||
}
|
||||
|
||||
type NodeBGPPeerListOptions struct {
|
||||
Nodename string
|
||||
PeerIP net.IP
|
||||
Port uint16
|
||||
}
|
||||
|
||||
func (options NodeBGPPeerListOptions) defaultPathRoot() string {
|
||||
@@ -78,19 +85,21 @@ func (options NodeBGPPeerListOptions) defaultPathRoot() string {
|
||||
options.Nodename)
|
||||
} else {
|
||||
return fmt.Sprintf("/calico/bgp/v1/host/%s/peer_v%d/%s",
|
||||
options.Nodename, options.PeerIP.Version(), options.PeerIP)
|
||||
options.Nodename, options.PeerIP.Version(), combineIPAndPort(options.PeerIP, options.Port))
|
||||
}
|
||||
}
|
||||
|
||||
func (options NodeBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Get BGPPeer key from %s", path)
|
||||
nodename := ""
|
||||
var port uint16
|
||||
peerIP := net.IP{}
|
||||
ekeyb := []byte(path)
|
||||
|
||||
if r := matchHostBGPPeer.FindAllSubmatch(ekeyb, -1); len(r) == 1 {
|
||||
var ipBytes []byte
|
||||
ipBytes, port = extractIPAndPort(string(r[0][2]))
|
||||
nodename = string(r[0][1])
|
||||
if err := peerIP.UnmarshalText(r[0][2]); err != nil {
|
||||
if err := peerIP.UnmarshalText(ipBytes); err != nil {
|
||||
log.WithError(err).WithField("PeerIP", r[0][2]).Error("Error unmarshalling GlobalBGPPeer IP address")
|
||||
return nil
|
||||
}
|
||||
@@ -107,11 +116,16 @@ func (options NodeBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Didn't match hostname %s != %s", options.Nodename, nodename)
|
||||
return nil
|
||||
}
|
||||
return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename}
|
||||
|
||||
if port == 0 {
|
||||
return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename}
|
||||
}
|
||||
return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename, Port: port}
|
||||
}
|
||||
|
||||
type GlobalBGPPeerKey struct {
|
||||
PeerIP net.IP `json:"-" validate:"required"`
|
||||
Port uint16 `json:"-" validate:"omitempty"`
|
||||
}
|
||||
|
||||
func (key GlobalBGPPeerKey) defaultPath() (string, error) {
|
||||
@@ -119,7 +133,7 @@ func (key GlobalBGPPeerKey) defaultPath() (string, error) {
|
||||
return "", errors.ErrorInsufficientIdentifiers{Name: "peerIP"}
|
||||
}
|
||||
e := fmt.Sprintf("/calico/bgp/v1/global/peer_v%d/%s",
|
||||
key.PeerIP.Version(), key.PeerIP)
|
||||
key.PeerIP.Version(), combineIPAndPort(key.PeerIP, key.Port))
|
||||
return e, nil
|
||||
}
|
||||
|
||||
@@ -136,11 +150,12 @@ func (key GlobalBGPPeerKey) valueType() (reflect.Type, error) {
|
||||
}
|
||||
|
||||
func (key GlobalBGPPeerKey) String() string {
|
||||
return fmt.Sprintf("BGPPeer(global, ip=%s)", key.PeerIP)
|
||||
return fmt.Sprintf("BGPPeer(global, ip=%s, port=%d)", key.PeerIP, key.Port)
|
||||
}
|
||||
|
||||
type GlobalBGPPeerListOptions struct {
|
||||
PeerIP net.IP
|
||||
Port uint16
|
||||
}
|
||||
|
||||
func (options GlobalBGPPeerListOptions) defaultPathRoot() string {
|
||||
@@ -148,7 +163,7 @@ func (options GlobalBGPPeerListOptions) defaultPathRoot() string {
|
||||
return "/calico/bgp/v1/global"
|
||||
} else {
|
||||
return fmt.Sprintf("/calico/bgp/v1/global/peer_v%d/%s",
|
||||
options.PeerIP.Version(), options.PeerIP)
|
||||
options.PeerIP.Version(), combineIPAndPort(options.PeerIP, options.Port))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,9 +171,12 @@ func (options GlobalBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Get BGPPeer key from %s", path)
|
||||
peerIP := net.IP{}
|
||||
ekeyb := []byte(path)
|
||||
var port uint16
|
||||
|
||||
if r := matchGlobalBGPPeer.FindAllSubmatch(ekeyb, -1); len(r) == 1 {
|
||||
if err := peerIP.UnmarshalText(r[0][1]); err != nil {
|
||||
var ipBytes []byte
|
||||
ipBytes, port = extractIPAndPort(string(r[0][1]))
|
||||
if err := peerIP.UnmarshalText(ipBytes); err != nil {
|
||||
log.WithError(err).WithField("PeerIP", r[0][1]).Error("Error unmarshalling GlobalBGPPeer IP address")
|
||||
return nil
|
||||
}
|
||||
@@ -171,7 +189,11 @@ func (options GlobalBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Didn't match peerIP %s != %s", options.PeerIP.String(), peerIP.String())
|
||||
return nil
|
||||
}
|
||||
return GlobalBGPPeerKey{PeerIP: peerIP}
|
||||
|
||||
if port == 0 {
|
||||
return GlobalBGPPeerKey{PeerIP: peerIP, Port: port}
|
||||
}
|
||||
return GlobalBGPPeerKey{PeerIP: peerIP, Port: port}
|
||||
}
|
||||
|
||||
type BGPPeer struct {
|
||||
@@ -184,3 +206,25 @@ type BGPPeer struct {
|
||||
// configuration.
|
||||
ASNum numorstring.ASNumber `json:"as_num,string"`
|
||||
}
|
||||
|
||||
func extractIPAndPort(ipPort string) ([]byte, uint16) {
|
||||
arr := strings.Split(ipPort, ipPortSeparator)
|
||||
if len(arr) == 2 {
|
||||
port, err := strconv.ParseUint(arr[1], 0, 16)
|
||||
if err != nil {
|
||||
log.Warningf("Error extracting port. %#v", err)
|
||||
return []byte(ipPort), defaultPort
|
||||
}
|
||||
return []byte(arr[0]), uint16(port)
|
||||
}
|
||||
return []byte(ipPort), defaultPort
|
||||
}
|
||||
|
||||
func combineIPAndPort(ip net.IP, port uint16) string {
|
||||
if port == 0 || port == defaultPort {
|
||||
return ip.String()
|
||||
} else {
|
||||
strPort := strconv.Itoa(int(port))
|
||||
return ip.String() + ipPortSeparator + strPort
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -23,18 +23,22 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
const (
|
||||
// Common attributes which may be set on allocations by clients.
|
||||
IPAMBlockAttributePod = "pod"
|
||||
IPAMBlockAttributeNamespace = "namespace"
|
||||
IPAMBlockAttributeNode = "node"
|
||||
IPAMBlockAttributeType = "type"
|
||||
IPAMBlockAttributeTypeIPIP = "ipipTunnelAddress"
|
||||
IPAMBlockAttributeTypeVXLAN = "vxlanTunnelAddress"
|
||||
IPAMBlockAttributePod = "pod"
|
||||
IPAMBlockAttributeNamespace = "namespace"
|
||||
IPAMBlockAttributeNode = "node"
|
||||
IPAMBlockAttributeType = "type"
|
||||
IPAMBlockAttributeTypeIPIP = "ipipTunnelAddress"
|
||||
IPAMBlockAttributeTypeVXLAN = "vxlanTunnelAddress"
|
||||
IPAMBlockAttributeTypeVXLANV6 = "vxlanV6TunnelAddress"
|
||||
IPAMBlockAttributeTypeWireguard = "wireguardTunnelAddress"
|
||||
IPAMBlockAttributeTypeWireguardV6 = "wireguardV6TunnelAddress"
|
||||
IPAMBlockAttributeTimestamp = "timestamp"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -91,18 +95,50 @@ func (options BlockListOptions) KeyFromDefaultPath(path string) Key {
|
||||
return nil
|
||||
}
|
||||
cidrStr := strings.Replace(r[0][1], "-", "/", 1)
|
||||
_, cidr, _ := net.ParseCIDR(cidrStr)
|
||||
_, cidr, err := net.ParseCIDR(cidrStr)
|
||||
if err != nil {
|
||||
log.Debugf("find an invalid cidr %s for path=%v , info=%v ", r[0][1], path, err)
|
||||
return nil
|
||||
}
|
||||
return BlockKey{CIDR: *cidr}
|
||||
}
|
||||
|
||||
type AllocationBlock struct {
|
||||
CIDR net.IPNet `json:"cidr"`
|
||||
Affinity *string `json:"affinity"`
|
||||
StrictAffinity bool `json:"strictAffinity"`
|
||||
Allocations []*int `json:"allocations"`
|
||||
Unallocated []int `json:"unallocated"`
|
||||
Attributes []AllocationAttribute `json:"attributes"`
|
||||
Deleted bool `json:"deleted"`
|
||||
// The block's CIDR.
|
||||
CIDR net.IPNet `json:"cidr"`
|
||||
|
||||
// Affinity of the block, if this block has one. If set, it will be of the form
|
||||
// "host:<hostname>". If not set, this block is not affine to a host.
|
||||
Affinity *string `json:"affinity"`
|
||||
|
||||
// Array of allocations in-use within this block. nil entries mean the allocation is free.
|
||||
// For non-nil entries at index i, the index is the ordinal of the allocation within this block
|
||||
// and the value is the index of the associated attributes in the Attributes array.
|
||||
Allocations []*int `json:"allocations"`
|
||||
|
||||
// Unallocated is an ordered list of allocations which are free in the block.
|
||||
Unallocated []int `json:"unallocated"`
|
||||
|
||||
// Attributes is an array of arbitrary metadata associated with allocations in the block. To find
|
||||
// attributes for a given allocation, use the value of the allocation's entry in the Allocations array
|
||||
// as the index of the element in this array.
|
||||
Attributes []AllocationAttribute `json:"attributes"`
|
||||
|
||||
// We store a sequence number that is updated each time the block is written.
|
||||
// Each allocation will also store the sequence number of the block at the time of its creation.
|
||||
// When releasing an IP, passing the sequence number associated with the allocation allows us
|
||||
// to protect against a race condition and ensure the IP hasn't been released and re-allocated
|
||||
// since the release request.
|
||||
SequenceNumber uint64 `json:"sequenceNumber"`
|
||||
|
||||
// Map of allocated ordinal within the block to sequence number of the block at
|
||||
// the time of allocation. Kubernetes does not allow numerical keys for maps, so
|
||||
// the key is cast to a string.
|
||||
SequenceNumberForAllocation map[string]uint64 `json:"sequenceNumberForAllocation"`
|
||||
|
||||
// Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby
|
||||
// deletion will not return a conflict error if the block has been updated.
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
// HostAffinity is deprecated in favor of Affinity.
|
||||
// This is only to keep compatibility with existing deployments.
|
||||
@@ -110,6 +146,21 @@ type AllocationBlock struct {
|
||||
HostAffinity *string `json:"hostAffinity,omitempty"`
|
||||
}
|
||||
|
||||
func (b *AllocationBlock) SetSequenceNumberForOrdinal(ordinal int) {
|
||||
if b.SequenceNumberForAllocation == nil {
|
||||
b.SequenceNumberForAllocation = map[string]uint64{}
|
||||
}
|
||||
b.SequenceNumberForAllocation[fmt.Sprintf("%d", ordinal)] = b.SequenceNumber
|
||||
}
|
||||
|
||||
func (b *AllocationBlock) GetSequenceNumberForOrdinal(ordinal int) uint64 {
|
||||
return b.SequenceNumberForAllocation[fmt.Sprintf("%d", ordinal)]
|
||||
}
|
||||
|
||||
func (b *AllocationBlock) ClearSequenceNumberForOrdinal(ordinal int) {
|
||||
delete(b.SequenceNumberForAllocation, fmt.Sprintf("%d", ordinal))
|
||||
}
|
||||
|
||||
func (b *AllocationBlock) MarkDeleted() {
|
||||
b.Deleted = true
|
||||
}
|
||||
@@ -120,7 +171,7 @@ func (b *AllocationBlock) IsDeleted() bool {
|
||||
|
||||
func (b *AllocationBlock) Host() string {
|
||||
if b.Affinity != nil && strings.HasPrefix(*b.Affinity, "host:") {
|
||||
return strings.TrimLeft(*b.Affinity, "host:")
|
||||
return strings.TrimPrefix(*b.Affinity, "host:")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -175,8 +226,7 @@ func (b *AllocationBlock) IPToOrdinal(ip net.IP) (int, error) {
|
||||
|
||||
// Calculates the IP at the given position within the block. ord=0 gives the first IP in the block.
|
||||
func (b *AllocationBlock) OrdinalToIP(ord int) net.IP {
|
||||
sum := big.NewInt(0).Add(net.IPToBigInt(net.IP{IP: b.CIDR.IP}), big.NewInt(int64(ord)))
|
||||
return net.BigIntToIP(sum)
|
||||
return b.CIDR.NthIP(ord)
|
||||
}
|
||||
|
||||
type AllocationAttribute struct {
|
||||
@@ -20,9 +20,10 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -99,6 +100,10 @@ func (options BlockAffinityListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
cidrStr := strings.Replace(r[0][2], "-", "/", 1)
|
||||
_, cidr, _ := net.ParseCIDR(cidrStr)
|
||||
if cidr == nil {
|
||||
log.Debugf("Failed to parse CIDR in block affinity path: %q", path)
|
||||
return nil
|
||||
}
|
||||
host := r[0][1]
|
||||
|
||||
if options.Host != "" && options.Host != host {
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,9 +21,10 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,8 +21,9 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -22,9 +22,7 @@ const (
|
||||
IPAMConfigGlobalName = "default"
|
||||
)
|
||||
|
||||
var (
|
||||
typeIPAMConfig = reflect.TypeOf(IPAMConfig{})
|
||||
)
|
||||
var typeIPAMConfig = reflect.TypeOf(IPAMConfig{})
|
||||
|
||||
type IPAMConfigKey struct{}
|
||||
|
||||
@@ -51,4 +49,5 @@ func (key IPAMConfigKey) String() string {
|
||||
type IPAMConfig struct {
|
||||
StrictAffinity bool `json:"strict_affinity,omitempty"`
|
||||
AutoAllocateBlocks bool `json:"auto_allocate_blocks,omitempty"`
|
||||
MaxBlocksPerHost int `json:"maxBlocksPerHost,omitempty"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016,2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -19,8 +19,9 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016,2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -16,15 +16,15 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/encap"
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/backend/encap"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -96,11 +96,12 @@ func (options IPPoolListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
|
||||
type IPPool struct {
|
||||
CIDR net.IPNet `json:"cidr"`
|
||||
IPIPInterface string `json:"ipip"`
|
||||
IPIPMode encap.Mode `json:"ipip_mode"`
|
||||
VXLANMode encap.Mode `json:"vxlan_mode"`
|
||||
Masquerade bool `json:"masquerade"`
|
||||
IPAM bool `json:"ipam"`
|
||||
Disabled bool `json:"disabled"`
|
||||
CIDR net.IPNet `json:"cidr"`
|
||||
IPIPInterface string `json:"ipip"`
|
||||
IPIPMode encap.Mode `json:"ipip_mode"`
|
||||
VXLANMode encap.Mode `json:"vxlan_mode"`
|
||||
Masquerade bool `json:"masquerade"`
|
||||
IPAM bool `json:"ipam"`
|
||||
Disabled bool `json:"disabled"`
|
||||
DisableBGPExport bool `json:"disableBGPExport"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -16,17 +16,18 @@ package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
net2 "net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/json"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/namespace"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
// RawString is used a value type to indicate that the value is a bare non-JSON string
|
||||
@@ -85,10 +86,10 @@ type ListInterface interface {
|
||||
//
|
||||
// The Value is dependent on the Key, but in general will be on of the following
|
||||
// types:
|
||||
// - A pointer to a struct
|
||||
// - A slice or map
|
||||
// - A bare string, boolean value or IP address (i.e. without quotes, so not
|
||||
// JSON format).
|
||||
// - A pointer to a struct
|
||||
// - A slice or map
|
||||
// - A bare string, boolean value or IP address (i.e. without quotes, so not
|
||||
// JSON format).
|
||||
type KVPair struct {
|
||||
Key Key
|
||||
Value interface{}
|
||||
@@ -134,10 +135,6 @@ func KeyToDefaultDeletePath(key Key) (string, error) {
|
||||
return key.defaultDeletePath()
|
||||
}
|
||||
|
||||
func KeyToValueType(key Key) (reflect.Type, error) {
|
||||
return key.valueType()
|
||||
}
|
||||
|
||||
// KeyToDefaultDeleteParentPaths returns a slice of '/'-delimited
|
||||
// paths which are used to delete parent entries that may be auto-created
|
||||
// by directory-based KV stores (e.g. etcd v3). These paths should also be
|
||||
@@ -147,16 +144,19 @@ func KeyToValueType(key Key) (reflect.Type, error) {
|
||||
// in the order supplied in the slice and only if the directory is empty.
|
||||
//
|
||||
// For example,
|
||||
// KeyToDefaultDeletePaths(WorkloadEndpointKey{
|
||||
// Nodename: "h",
|
||||
// OrchestratorID: "o",
|
||||
// WorkloadID: "w",
|
||||
// EndpointID: "e",
|
||||
// })
|
||||
//
|
||||
// KeyToDefaultDeletePaths(WorkloadEndpointKey{
|
||||
// Nodename: "h",
|
||||
// OrchestratorID: "o",
|
||||
// WorkloadID: "w",
|
||||
// EndpointID: "e",
|
||||
// })
|
||||
//
|
||||
// returns
|
||||
//
|
||||
// ["/calico/v1/host/h/workload/o/w/endpoint",
|
||||
// "/calico/v1/host/h/workload/o/w"]
|
||||
//
|
||||
// "/calico/v1/host/h/workload/o/w"]
|
||||
//
|
||||
// indicating that these paths should also be deleted when they are empty.
|
||||
// In this example it is equivalent to deleting the workload when there are
|
||||
@@ -192,10 +192,249 @@ func IsListOptionsLastSegmentPrefix(listOptions ListInterface) bool {
|
||||
// of our <Type>Key structs. Returns nil if the string doesn't match one of
|
||||
// our key types.
|
||||
func KeyFromDefaultPath(path string) Key {
|
||||
// "v3" resource keys strictly require a leading slash but older "v1" keys were permissive.
|
||||
// For ease of parsing, strip the slash off now but pass it down to keyFromDefaultPathInner so
|
||||
// it can check for it later.
|
||||
normalizedPath := path
|
||||
if strings.HasPrefix(normalizedPath, "/") {
|
||||
normalizedPath = normalizedPath[1:]
|
||||
}
|
||||
|
||||
parts := strings.Split(normalizedPath, "/")
|
||||
if len(parts) < 3 {
|
||||
// After removing the optional `/` prefix, should have at least 3 segments.
|
||||
return nil
|
||||
}
|
||||
|
||||
return keyFromDefaultPathInner(path, parts)
|
||||
}
|
||||
|
||||
func keyFromDefaultPathInner(path string, parts []string) Key {
|
||||
if parts[0] != "calico" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch parts[1] {
|
||||
case "v1":
|
||||
switch parts[2] {
|
||||
case "ipam":
|
||||
return IPPoolListOptions{}.KeyFromDefaultPath(path)
|
||||
case "config":
|
||||
return GlobalConfigKey{Name: strings.Join(parts[3:], "/")}
|
||||
case "host":
|
||||
if len(parts) < 5 {
|
||||
return nil
|
||||
}
|
||||
hostname := parts[3]
|
||||
switch parts[4] {
|
||||
case "workload":
|
||||
if len(parts) != 9 || parts[7] != "endpoint" {
|
||||
return nil
|
||||
}
|
||||
return WorkloadEndpointKey{
|
||||
Hostname: unescapeName(hostname),
|
||||
OrchestratorID: unescapeName(parts[5]),
|
||||
WorkloadID: unescapeName(parts[6]),
|
||||
EndpointID: unescapeName(parts[8]),
|
||||
}
|
||||
case "endpoint":
|
||||
if len(parts) != 6 {
|
||||
return nil
|
||||
}
|
||||
return HostEndpointKey{
|
||||
Hostname: unescapeName(hostname),
|
||||
EndpointID: unescapeName(parts[5]),
|
||||
}
|
||||
case "config":
|
||||
return HostConfigKey{
|
||||
Hostname: hostname,
|
||||
Name: strings.Join(parts[5:], "/"),
|
||||
}
|
||||
case "metadata":
|
||||
if len(parts) != 5 {
|
||||
return nil
|
||||
}
|
||||
return HostMetadataKey{
|
||||
Hostname: hostname,
|
||||
}
|
||||
case "bird_ip":
|
||||
if len(parts) != 5 {
|
||||
return nil
|
||||
}
|
||||
return HostIPKey{
|
||||
Hostname: hostname,
|
||||
}
|
||||
case "wireguard":
|
||||
if len(parts) != 5 {
|
||||
return nil
|
||||
}
|
||||
return WireguardKey{
|
||||
NodeName: hostname,
|
||||
}
|
||||
}
|
||||
case "netset":
|
||||
if len(parts) != 4 {
|
||||
return nil
|
||||
}
|
||||
return NetworkSetKey{
|
||||
Name: unescapeName(parts[3]),
|
||||
}
|
||||
case "Ready":
|
||||
if len(parts) > 3 || path[0] != '/' {
|
||||
return nil
|
||||
}
|
||||
return ReadyFlagKey{}
|
||||
case "policy":
|
||||
if len(parts) < 6 {
|
||||
return nil
|
||||
}
|
||||
switch parts[3] {
|
||||
case "tier":
|
||||
if len(parts) < 6 {
|
||||
return nil
|
||||
}
|
||||
switch parts[5] {
|
||||
case "policy":
|
||||
if len(parts) != 7 {
|
||||
return nil
|
||||
}
|
||||
return PolicyKey{
|
||||
Name: unescapeName(parts[6]),
|
||||
}
|
||||
}
|
||||
case "profile":
|
||||
pk := unescapeName(parts[4])
|
||||
switch parts[5] {
|
||||
case "rules":
|
||||
return ProfileRulesKey{ProfileKey: ProfileKey{pk}}
|
||||
case "labels":
|
||||
return ProfileLabelsKey{ProfileKey: ProfileKey{pk}}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "bgp":
|
||||
switch parts[2] {
|
||||
case "v1":
|
||||
if len(parts) < 5 {
|
||||
return nil
|
||||
}
|
||||
switch parts[3] {
|
||||
case "global":
|
||||
return GlobalBGPConfigListOptions{}.KeyFromDefaultPath(path)
|
||||
case "host":
|
||||
if len(parts) < 6 {
|
||||
return nil
|
||||
}
|
||||
return NodeBGPConfigListOptions{}.KeyFromDefaultPath(path)
|
||||
}
|
||||
}
|
||||
case "ipam":
|
||||
if len(parts) < 5 {
|
||||
return nil
|
||||
}
|
||||
switch parts[2] {
|
||||
case "v2":
|
||||
switch parts[3] {
|
||||
case "assignment":
|
||||
return BlockListOptions{}.KeyFromDefaultPath(path)
|
||||
case "handle":
|
||||
if len(parts) > 5 {
|
||||
return nil
|
||||
}
|
||||
return IPAMHandleKey{
|
||||
HandleID: parts[4],
|
||||
}
|
||||
case "host":
|
||||
return BlockAffinityListOptions{}.KeyFromDefaultPath(path)
|
||||
}
|
||||
}
|
||||
case "resources":
|
||||
switch parts[2] {
|
||||
case "v3":
|
||||
// v3 resource keys strictly require the leading slash.
|
||||
if len(parts) < 6 || parts[3] != "projectcalico.org" || path[0] != '/' {
|
||||
return nil
|
||||
}
|
||||
switch len(parts) {
|
||||
case 6:
|
||||
ri, ok := resourceInfoByPlural[unescapeName(parts[4])]
|
||||
if !ok {
|
||||
log.Warnf("(BUG) unknown resource type: %v", path)
|
||||
return nil
|
||||
}
|
||||
if namespace.IsNamespaced(ri.kind) {
|
||||
log.Warnf("(BUG) Path is a global resource, but resource is namespaced: %v", path)
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Path is a global resource: %v", path)
|
||||
return ResourceKey{
|
||||
Kind: ri.kind,
|
||||
Name: unescapeName(parts[5]),
|
||||
}
|
||||
case 7:
|
||||
ri, ok := resourceInfoByPlural[unescapeName(parts[4])]
|
||||
if !ok {
|
||||
log.Warnf("(BUG) unknown resource type: %v", path)
|
||||
return nil
|
||||
}
|
||||
if !namespace.IsNamespaced(ri.kind) {
|
||||
log.Warnf("(BUG) Path is a namespaced resource, but resource is global: %v", path)
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Path is a namespaced resource: %v", path)
|
||||
return ResourceKey{
|
||||
Kind: ri.kind,
|
||||
Namespace: unescapeName(parts[5]),
|
||||
Name: unescapeName(parts[6]),
|
||||
}
|
||||
}
|
||||
}
|
||||
case "felix":
|
||||
if len(parts) < 4 {
|
||||
return nil
|
||||
}
|
||||
switch parts[2] {
|
||||
case "v1":
|
||||
switch parts[3] {
|
||||
case "host":
|
||||
if len(parts) != 7 || parts[5] != "endpoint" {
|
||||
return nil
|
||||
}
|
||||
return HostEndpointStatusKey{
|
||||
Hostname: parts[4],
|
||||
EndpointID: unescapeName(parts[6]),
|
||||
}
|
||||
}
|
||||
case "v2":
|
||||
if len(parts) < 7 {
|
||||
return nil
|
||||
}
|
||||
if parts[4] != "host" {
|
||||
return nil
|
||||
}
|
||||
switch parts[6] {
|
||||
case "status":
|
||||
return ActiveStatusReportListOptions{}.KeyFromDefaultPath(path)
|
||||
case "last_reported_status":
|
||||
return LastStatusReportListOptions{}.KeyFromDefaultPath(path)
|
||||
case "workload":
|
||||
return WorkloadEndpointStatusListOptions{}.KeyFromDefaultPath(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("Path is unknown: %v", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OldKeyFromDefaultPath is the old, (slower) implementation of KeyFromDefaultPath. It is kept to allow
|
||||
// fuzzing the new version against it. Parses the default path representation of a key into one
|
||||
// of our <Type>Key structs. Returns nil if the string doesn't match one of
|
||||
// our key types.
|
||||
func OldKeyFromDefaultPath(path string) Key {
|
||||
if m := matchWorkloadEndpoint.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a workload endpoint: %v", path)
|
||||
return WorkloadEndpointKey{
|
||||
Hostname: m[1],
|
||||
Hostname: unescapeName(m[1]),
|
||||
OrchestratorID: unescapeName(m[2]),
|
||||
WorkloadID: unescapeName(m[3]),
|
||||
EndpointID: unescapeName(m[4]),
|
||||
@@ -203,7 +442,7 @@ func KeyFromDefaultPath(path string) Key {
|
||||
} else if m := matchHostEndpoint.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a host endpoint: %v", path)
|
||||
return HostEndpointKey{
|
||||
Hostname: m[1],
|
||||
Hostname: unescapeName(m[1]),
|
||||
EndpointID: unescapeName(m[2]),
|
||||
}
|
||||
} else if m := matchNetworkSet.FindStringSubmatch(path); m != nil {
|
||||
@@ -211,6 +450,37 @@ func KeyFromDefaultPath(path string) Key {
|
||||
return NetworkSetKey{
|
||||
Name: unescapeName(m[1]),
|
||||
}
|
||||
} else if m := matchGlobalResource.FindStringSubmatch(path); m != nil {
|
||||
ri, ok := resourceInfoByPlural[unescapeName(m[1])]
|
||||
if !ok {
|
||||
log.Warnf("(BUG) unknown resource type: %v", path)
|
||||
return nil
|
||||
}
|
||||
if namespace.IsNamespaced(ri.kind) {
|
||||
log.Warnf("(BUG) Path is a global resource, but resource is namespaced: %v", path)
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Path is a global resource: %v", path)
|
||||
return ResourceKey{
|
||||
Kind: ri.kind,
|
||||
Name: unescapeName(m[2]),
|
||||
}
|
||||
} else if m := matchNamespacedResource.FindStringSubmatch(path); m != nil {
|
||||
ri, ok := resourceInfoByPlural[unescapeName(m[1])]
|
||||
if !ok {
|
||||
log.Warnf("(BUG) unknown resource type: %v", path)
|
||||
return nil
|
||||
}
|
||||
if !namespace.IsNamespaced(ri.kind) {
|
||||
log.Warnf("(BUG) Path is a namespaced resource, but resource is global: %v", path)
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Path is a namespaced resource: %v", path)
|
||||
return ResourceKey{
|
||||
Kind: resourceInfoByPlural[unescapeName(m[1])].kind,
|
||||
Namespace: unescapeName(m[2]),
|
||||
Name: unescapeName(m[3]),
|
||||
}
|
||||
} else if m := matchPolicy.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a policy: %v", path)
|
||||
return PolicyKey{
|
||||
@@ -220,9 +490,6 @@ func KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Path is a profile: %v (%v)", path, m[2])
|
||||
pk := ProfileKey{unescapeName(m[1])}
|
||||
switch m[2] {
|
||||
case "tags":
|
||||
log.Debugf("Profile tags")
|
||||
return ProfileTagsKey{ProfileKey: pk}
|
||||
case "rules":
|
||||
log.Debugf("Profile rules")
|
||||
return ProfileRulesKey{ProfileKey: pk}
|
||||
@@ -234,6 +501,9 @@ func KeyFromDefaultPath(path string) Key {
|
||||
} else if m := matchHostIp.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a host ID: %v", path)
|
||||
return HostIPKey{Hostname: m[1]}
|
||||
} else if m := matchWireguard.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a node name: %v", path)
|
||||
return WireguardKey{NodeName: m[1]}
|
||||
} else if m := matchIPPool.FindStringSubmatch(path); m != nil {
|
||||
log.Debugf("Path is a pool: %v", path)
|
||||
mungedCIDR := m[1]
|
||||
@@ -261,14 +531,6 @@ func KeyFromDefaultPath(path string) Key {
|
||||
return k
|
||||
} else if k := (BlockListOptions{}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (ResourceListOptions{Kind: v3.KindNode}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (ResourceListOptions{Kind: v3.KindBGPPeer}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (ResourceListOptions{Kind: v3.KindNetworkPolicy}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (ResourceListOptions{Kind: v3.KindIPPool}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (HostEndpointStatusListOptions{}).KeyFromDefaultPath(path); k != nil {
|
||||
return k
|
||||
} else if k := (WorkloadEndpointStatusListOptions{}).KeyFromDefaultPath(path); k != nil {
|
||||
@@ -304,7 +566,7 @@ func ParseValue(key Key, rawData []byte) (interface{}, error) {
|
||||
if ip == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &net.IP{ip}, nil
|
||||
return &net.IP{IP: ip}, nil
|
||||
}
|
||||
value := reflect.New(valueType)
|
||||
elem := value.Elem()
|
||||
@@ -342,7 +604,7 @@ func ParseValue(key Key, rawData []byte) (interface{}, error) {
|
||||
return iface, nil
|
||||
}
|
||||
|
||||
// Serialize a value in the model to a []byte to stored in the datastore. This
|
||||
// SerializeValue serializes a value in the model to a []byte to be stored in the datastore. This
|
||||
// performs the opposite processing to ParseValue()
|
||||
func SerializeValue(d *KVPair) ([]byte, error) {
|
||||
valueType, err := d.Key.valueType()
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,12 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scope
|
||||
|
||||
type Scope string
|
||||
package model
|
||||
|
||||
const (
|
||||
Undefined Scope = ""
|
||||
Global = "global"
|
||||
Node = "node"
|
||||
KindKubernetesEndpointSlice = "KubernetesEndpointSlice"
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,15 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
package model
|
||||
|
||||
import apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
func IsNamespaced(kind string) bool {
|
||||
switch kind {
|
||||
case apiv3.KindWorkloadEndpoint, apiv3.KindNetworkPolicy, apiv3.KindNetworkSet:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
const (
|
||||
KindKubernetesNetworkPolicy = "KubernetesNetworkPolicy"
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,12 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package scope implements field types that represent different scopes for resource
|
||||
types. For example, a resource may be valid at the global scope in that applies to
|
||||
all Calico nodes, or may be at a node scope in that applies to a specific node.
|
||||
package model
|
||||
|
||||
The internal representation is an integer, but the JSON serialization of these
|
||||
values is a string.
|
||||
*/
|
||||
package scope
|
||||
const (
|
||||
KindKubernetesService = "KubernetesService"
|
||||
)
|
||||
@@ -21,9 +21,10 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,6 +89,7 @@ func (options NetworkSetListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
|
||||
type NetworkSet struct {
|
||||
Nets []net.IPNet `json:"nets,omitempty" validate:"omitempty,dive,cidr"`
|
||||
Labels map[string]string `json:"labels,omitempty" validate:"omitempty,labels"`
|
||||
Nets []net.IPNet `json:"nets,omitempty" validate:"omitempty,dive,cidr"`
|
||||
Labels map[string]string `json:"labels,omitempty" validate:"omitempty,labels"`
|
||||
ProfileIDs []string `json:"profile_ids,omitempty" validate:"omitempty,dive,name"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016,2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -21,10 +21,12 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -32,8 +34,10 @@ var (
|
||||
typeHostMetadata = reflect.TypeOf(HostMetadata{})
|
||||
typeOrchRefs = reflect.TypeOf([]OrchRef{})
|
||||
typeHostIp = rawIPType
|
||||
typeWireguard = reflect.TypeOf(Wireguard{})
|
||||
matchHostMetadata = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/metadata$`)
|
||||
matchHostIp = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/bird_ip$`)
|
||||
matchWireguard = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/wireguard$`)
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
@@ -57,6 +61,13 @@ type OrchRef struct {
|
||||
NodeName string `json:"nodeName,omitempty"`
|
||||
}
|
||||
|
||||
type Wireguard struct {
|
||||
InterfaceIPv4Addr *net.IP `json:"interfaceIPv4Addr,omitempty"`
|
||||
PublicKey string `json:"publicKey,omitempty"`
|
||||
InterfaceIPv6Addr *net.IP `json:"interfaceIPv6Addr,omitempty"`
|
||||
PublicKeyV6 string `json:"publicKeyV6,omitempty"`
|
||||
}
|
||||
|
||||
type NodeKey struct {
|
||||
Hostname string
|
||||
}
|
||||
@@ -218,3 +229,54 @@ func (options OrchRefListOptions) defaultPathRoot() string {
|
||||
func (options OrchRefListOptions) KeyFromDefaultPath(path string) Key {
|
||||
return OrchRefKey{Hostname: options.Hostname}
|
||||
}
|
||||
|
||||
// The Felix Wireguard Key.
|
||||
type WireguardKey struct {
|
||||
NodeName string
|
||||
}
|
||||
|
||||
func (key WireguardKey) defaultPath() (string, error) {
|
||||
if key.NodeName == "" {
|
||||
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
|
||||
}
|
||||
return fmt.Sprintf("/calico/v1/host/%s/wireguard",
|
||||
key.NodeName), nil
|
||||
}
|
||||
|
||||
func (key WireguardKey) defaultDeletePath() (string, error) {
|
||||
return key.defaultPath()
|
||||
}
|
||||
|
||||
func (key WireguardKey) defaultDeleteParentPaths() ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (key WireguardKey) valueType() (reflect.Type, error) {
|
||||
return typeWireguard, nil
|
||||
}
|
||||
|
||||
func (key WireguardKey) String() string {
|
||||
return fmt.Sprintf("Node(nodename=%s)", key.NodeName)
|
||||
}
|
||||
|
||||
type WireguardListOptions struct {
|
||||
NodeName string
|
||||
}
|
||||
|
||||
func (options WireguardListOptions) defaultPathRoot() string {
|
||||
if options.NodeName == "" {
|
||||
return "/calico/v1/host"
|
||||
} else {
|
||||
return fmt.Sprintf("/calico/v1/host/%s/wireguard", options.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
func (options WireguardListOptions) KeyFromDefaultPath(path string) Key {
|
||||
log.Debugf("Get Node key from %s", path)
|
||||
if r := matchWireguard.FindAllStringSubmatch(path, -1); len(r) == 1 {
|
||||
return WireguardKey{NodeName: r[0][1]}
|
||||
} else {
|
||||
log.Debugf("%s didn't match regex", path)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -22,8 +22,9 @@ import (
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -22,12 +22,13 @@ import (
|
||||
|
||||
"sort"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
matchProfile = regexp.MustCompile("^/?calico/v1/policy/profile/([^/]+)/(tags|rules|labels)$")
|
||||
matchProfile = regexp.MustCompile("^/?calico/v1/policy/profile/([^/]+)/(rules|labels)$")
|
||||
typeProfile = reflect.TypeOf(Profile{})
|
||||
)
|
||||
|
||||
@@ -80,24 +81,6 @@ func (key ProfileRulesKey) String() string {
|
||||
return fmt.Sprintf("ProfileRules(name=%s)", key.Name)
|
||||
}
|
||||
|
||||
// ProfileTagsKey implements the KeyInterface for the profile tags
|
||||
type ProfileTagsKey struct {
|
||||
ProfileKey
|
||||
}
|
||||
|
||||
func (key ProfileTagsKey) defaultPath() (string, error) {
|
||||
e, err := key.ProfileKey.defaultPath()
|
||||
return e + "/tags", err
|
||||
}
|
||||
|
||||
func (key ProfileTagsKey) valueType() (reflect.Type, error) {
|
||||
return reflect.TypeOf([]string{}), nil
|
||||
}
|
||||
|
||||
func (key ProfileTagsKey) String() string {
|
||||
return fmt.Sprintf("ProfileTags(name=%s)", key.Name)
|
||||
}
|
||||
|
||||
// ProfileLabelsKey implements the KeyInterface for the profile labels
|
||||
type ProfileLabelsKey struct {
|
||||
ProfileKey
|
||||
@@ -144,8 +127,6 @@ func (options ProfileListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
pk := ProfileKey{Name: name}
|
||||
switch kind {
|
||||
case "tags":
|
||||
return ProfileTagsKey{ProfileKey: pk}
|
||||
case "labels":
|
||||
return ProfileLabelsKey{ProfileKey: pk}
|
||||
case "rules":
|
||||
@@ -174,8 +155,6 @@ func (_ *ProfileListOptions) ListConvert(ds []*KVPair) []*KVPair {
|
||||
var name string
|
||||
for _, d := range ds {
|
||||
switch t := d.Key.(type) {
|
||||
case ProfileTagsKey:
|
||||
name = t.Name
|
||||
case ProfileLabelsKey:
|
||||
name = t.Name
|
||||
case ProfileRulesKey:
|
||||
@@ -197,10 +176,6 @@ func (_ *ProfileListOptions) ListConvert(ds []*KVPair) []*KVPair {
|
||||
|
||||
p := pd.Value.(*Profile)
|
||||
switch t := d.Value.(type) {
|
||||
case []string: // must be tags #TODO should type these
|
||||
log.Debugf("Store tags %v", t)
|
||||
p.Tags = t
|
||||
pd.Revision = d.Revision
|
||||
case map[string]string: // must be labels
|
||||
log.Debugf("Store labels %v", t)
|
||||
p.Labels = t
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -22,33 +22,40 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/namespace"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/namespace"
|
||||
)
|
||||
|
||||
// Name/type information about a single resource.
|
||||
type resourceInfo struct {
|
||||
typeOf reflect.Type
|
||||
plural string
|
||||
kind string
|
||||
typeOf reflect.Type
|
||||
plural string
|
||||
kindLower string
|
||||
kind string
|
||||
}
|
||||
|
||||
var (
|
||||
matchGlobalResource = regexp.MustCompile("^/calico/resources/v3/projectcalico[.]org/([^/]+)/([^/]+)$")
|
||||
matchNamespacedResource = regexp.MustCompile("^/calico/resources/v3/projectcalico[.]org/([^/]+)/([^/]+)/([^/]+)$")
|
||||
resourceInfoByKind = make(map[string]resourceInfo)
|
||||
resourceInfoByKindLower = make(map[string]resourceInfo)
|
||||
resourceInfoByPlural = make(map[string]resourceInfo)
|
||||
)
|
||||
|
||||
func registerResourceInfo(kind string, plural string, typeOf reflect.Type) {
|
||||
kind = strings.ToLower(kind)
|
||||
kindLower := strings.ToLower(kind)
|
||||
plural = strings.ToLower(plural)
|
||||
ri := resourceInfo{
|
||||
typeOf: typeOf,
|
||||
kind: kind,
|
||||
plural: plural,
|
||||
typeOf: typeOf,
|
||||
kindLower: kindLower,
|
||||
kind: kind,
|
||||
plural: plural,
|
||||
}
|
||||
resourceInfoByKind[kind] = ri
|
||||
resourceInfoByKindLower[kindLower] = ri
|
||||
resourceInfoByPlural[plural] = ri
|
||||
}
|
||||
|
||||
@@ -93,20 +100,40 @@ func init() {
|
||||
"ippools",
|
||||
reflect.TypeOf(apiv3.IPPool{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindIPReservation,
|
||||
"ipreservations",
|
||||
reflect.TypeOf(apiv3.IPReservation{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindNetworkPolicy,
|
||||
"networkpolicies",
|
||||
reflect.TypeOf(apiv3.NetworkPolicy{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
KindKubernetesNetworkPolicy,
|
||||
"kubernetesnetworkpolicies",
|
||||
reflect.TypeOf(apiv3.NetworkPolicy{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
KindKubernetesEndpointSlice,
|
||||
"kubernetesendpointslices",
|
||||
reflect.TypeOf(discovery.EndpointSlice{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindNetworkSet,
|
||||
"networksets",
|
||||
reflect.TypeOf(apiv3.NetworkSet{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindNode,
|
||||
libapiv3.KindNode,
|
||||
"nodes",
|
||||
reflect.TypeOf(apiv3.Node{}),
|
||||
reflect.TypeOf(libapiv3.Node{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindCalicoNodeStatus,
|
||||
"caliconodestatuses",
|
||||
reflect.TypeOf(apiv3.CalicoNodeStatus{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindProfile,
|
||||
@@ -114,9 +141,33 @@ func init() {
|
||||
reflect.TypeOf(apiv3.Profile{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindWorkloadEndpoint,
|
||||
libapiv3.KindWorkloadEndpoint,
|
||||
"workloadendpoints",
|
||||
reflect.TypeOf(apiv3.WorkloadEndpoint{}),
|
||||
reflect.TypeOf(libapiv3.WorkloadEndpoint{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
libapiv3.KindIPAMConfig,
|
||||
"ipamconfigs",
|
||||
reflect.TypeOf(libapiv3.IPAMConfig{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindKubeControllersConfiguration,
|
||||
"kubecontrollersconfigurations",
|
||||
reflect.TypeOf(apiv3.KubeControllersConfiguration{}))
|
||||
registerResourceInfo(
|
||||
KindKubernetesService,
|
||||
"kubernetesservice",
|
||||
reflect.TypeOf(kapiv1.Service{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
libapiv3.KindBlockAffinity,
|
||||
"blockaffinities",
|
||||
reflect.TypeOf(libapiv3.BlockAffinity{}),
|
||||
)
|
||||
registerResourceInfo(
|
||||
apiv3.KindBGPFilter,
|
||||
"BGPFilters",
|
||||
reflect.TypeOf(apiv3.BGPFilter{}),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -134,7 +185,7 @@ func (key ResourceKey) defaultPath() (string, error) {
|
||||
}
|
||||
|
||||
func (key ResourceKey) defaultDeletePath() (string, error) {
|
||||
ri, ok := resourceInfoByKind[strings.ToLower(key.Kind)]
|
||||
ri, ok := resourceInfoByKindLower[strings.ToLower(key.Kind)]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't convert key: %+v", key)
|
||||
}
|
||||
@@ -149,7 +200,7 @@ func (key ResourceKey) defaultDeleteParentPaths() ([]string, error) {
|
||||
}
|
||||
|
||||
func (key ResourceKey) valueType() (reflect.Type, error) {
|
||||
ri, ok := resourceInfoByKind[strings.ToLower(key.Kind)]
|
||||
ri, ok := resourceInfoByKindLower[strings.ToLower(key.Kind)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected resource kind: " + key.Kind)
|
||||
}
|
||||
@@ -184,7 +235,7 @@ func (options ResourceListOptions) IsLastSegmentIsPrefix() bool {
|
||||
}
|
||||
|
||||
func (options ResourceListOptions) KeyFromDefaultPath(path string) Key {
|
||||
ri, ok := resourceInfoByKind[strings.ToLower(options.Kind)]
|
||||
ri, ok := resourceInfoByKindLower[strings.ToLower(options.Kind)]
|
||||
if !ok {
|
||||
log.Panic("Unexpected resource kind: " + options.Kind)
|
||||
}
|
||||
@@ -231,7 +282,7 @@ func (options ResourceListOptions) KeyFromDefaultPath(path string) Key {
|
||||
kindPlural := r[0][1]
|
||||
name := r[0][2]
|
||||
if kindPlural != ri.plural {
|
||||
log.Debugf("Didn't match kind %s != %s", kindPlural, kindPlural)
|
||||
log.Debugf("Didn't match kind %s != %s", kindPlural, ri.plural)
|
||||
return nil
|
||||
}
|
||||
if len(options.Name) != 0 {
|
||||
@@ -247,7 +298,7 @@ func (options ResourceListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
|
||||
func (options ResourceListOptions) defaultPathRoot() string {
|
||||
ri, ok := resourceInfoByKind[strings.ToLower(options.Kind)]
|
||||
ri, ok := resourceInfoByKindLower[strings.ToLower(options.Kind)]
|
||||
if !ok {
|
||||
log.Panic("Unexpected resource kind: " + options.Kind)
|
||||
}
|
||||
@@ -19,9 +19,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
type Rule struct {
|
||||
@@ -40,16 +41,20 @@ type Rule struct {
|
||||
NotICMPType *int `json:"!icmp_type,omitempty" validate:"omitempty,gte=0,lt=255"`
|
||||
NotICMPCode *int `json:"!icmp_code,omitempty" validate:"omitempty,gte=0,lte=255"`
|
||||
|
||||
SrcTag string `json:"src_tag,omitempty" validate:"omitempty,tag"`
|
||||
SrcNet *net.IPNet `json:"src_net,omitempty" validate:"omitempty"`
|
||||
SrcNets []*net.IPNet `json:"src_nets,omitempty" validate:"omitempty"`
|
||||
SrcSelector string `json:"src_selector,omitempty" validate:"omitempty,selector"`
|
||||
SrcPorts []numorstring.Port `json:"src_ports,omitempty" validate:"omitempty,dive"`
|
||||
DstTag string `json:"dst_tag,omitempty" validate:"omitempty,tag"`
|
||||
DstSelector string `json:"dst_selector,omitempty" validate:"omitempty,selector"`
|
||||
DstNet *net.IPNet `json:"dst_net,omitempty" validate:"omitempty"`
|
||||
DstNets []*net.IPNet `json:"dst_nets,omitempty" validate:"omitempty"`
|
||||
DstPorts []numorstring.Port `json:"dst_ports,omitempty" validate:"omitempty,dive"`
|
||||
SrcTag string `json:"src_tag,omitempty" validate:"omitempty,tag"`
|
||||
SrcNet *net.IPNet `json:"src_net,omitempty" validate:"omitempty"`
|
||||
SrcNets []*net.IPNet `json:"src_nets,omitempty" validate:"omitempty"`
|
||||
SrcSelector string `json:"src_selector,omitempty" validate:"omitempty,selector"`
|
||||
SrcPorts []numorstring.Port `json:"src_ports,omitempty" validate:"omitempty,dive"`
|
||||
SrcService string `json:"src_service,omitempty" validate:"omitempty"`
|
||||
SrcServiceNamespace string `json:"src_service_ns,omitempty" validate:"omitempty"`
|
||||
DstTag string `json:"dst_tag,omitempty" validate:"omitempty,tag"`
|
||||
DstSelector string `json:"dst_selector,omitempty" validate:"omitempty,selector"`
|
||||
DstNet *net.IPNet `json:"dst_net,omitempty" validate:"omitempty"`
|
||||
DstNets []*net.IPNet `json:"dst_nets,omitempty" validate:"omitempty"`
|
||||
DstPorts []numorstring.Port `json:"dst_ports,omitempty" validate:"omitempty,dive"`
|
||||
DstService string `json:"dst_service,omitempty" validate:"omitempty"`
|
||||
DstServiceNamespace string `json:"dst_service_ns,omitempty" validate:"omitempty"`
|
||||
|
||||
NotSrcTag string `json:"!src_tag,omitempty" validate:"omitempty,tag"`
|
||||
NotSrcNet *net.IPNet `json:"!src_net,omitempty" validate:"omitempty"`
|
||||
@@ -80,6 +85,8 @@ type Rule struct {
|
||||
HTTPMatch *HTTPMatch `json:"http,omitempty" validate:"omitempty"`
|
||||
|
||||
LogPrefix string `json:"log_prefix,omitempty" validate:"omitempty"`
|
||||
|
||||
Metadata *RuleMetadata `json:"metadata,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
type HTTPMatch struct {
|
||||
@@ -87,6 +94,10 @@ type HTTPMatch struct {
|
||||
Paths []apiv3.HTTPPath `json:"paths,omitempty" validate:"omitempty"`
|
||||
}
|
||||
|
||||
type RuleMetadata struct {
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
func combineNets(n *net.IPNet, nets []*net.IPNet) []*net.IPNet {
|
||||
if n == nil {
|
||||
return nets
|
||||
@@ -20,8 +20,9 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,10 +21,12 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/api/pkg/lib/numorstring"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -148,20 +150,22 @@ func (options WorkloadEndpointListOptions) KeyFromDefaultPath(path string) Key {
|
||||
}
|
||||
|
||||
type WorkloadEndpoint struct {
|
||||
State string `json:"state"`
|
||||
Name string `json:"name"`
|
||||
ActiveInstanceID string `json:"active_instance_id"`
|
||||
Mac *net.MAC `json:"mac"`
|
||||
ProfileIDs []string `json:"profile_ids"`
|
||||
IPv4Nets []net.IPNet `json:"ipv4_nets"`
|
||||
IPv6Nets []net.IPNet `json:"ipv6_nets"`
|
||||
IPv4NAT []IPNAT `json:"ipv4_nat,omitempty"`
|
||||
IPv6NAT []IPNAT `json:"ipv6_nat,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
IPv4Gateway *net.IP `json:"ipv4_gateway,omitempty" validate:"omitempty,ipv4"`
|
||||
IPv6Gateway *net.IP `json:"ipv6_gateway,omitempty" validate:"omitempty,ipv6"`
|
||||
Ports []EndpointPort `json:"ports,omitempty" validate:"dive"`
|
||||
GenerateName string `json:"generate_name,omitempty"`
|
||||
State string `json:"state"`
|
||||
Name string `json:"name"`
|
||||
ActiveInstanceID string `json:"active_instance_id"`
|
||||
Mac *net.MAC `json:"mac"`
|
||||
ProfileIDs []string `json:"profile_ids"`
|
||||
IPv4Nets []net.IPNet `json:"ipv4_nets"`
|
||||
IPv6Nets []net.IPNet `json:"ipv6_nets"`
|
||||
IPv4NAT []IPNAT `json:"ipv4_nat,omitempty"`
|
||||
IPv6NAT []IPNAT `json:"ipv6_nat,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
IPv4Gateway *net.IP `json:"ipv4_gateway,omitempty" validate:"omitempty,ipv4"`
|
||||
IPv6Gateway *net.IP `json:"ipv6_gateway,omitempty" validate:"omitempty,ipv6"`
|
||||
Ports []EndpointPort `json:"ports,omitempty" validate:"dive"`
|
||||
GenerateName string `json:"generate_name,omitempty"`
|
||||
AllowSpoofedSourcePrefixes []net.IPNet `json:"allow_spoofed_source_ips,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
type EndpointPort struct {
|
||||
@@ -22,8 +22,9 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -16,6 +16,11 @@ package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Error indicating a problem connecting to the backend.
|
||||
@@ -28,8 +33,25 @@ func (e ErrorDatastoreError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e ErrorDatastoreError) Status() metav1.Status {
|
||||
if i, ok := e.Err.(apierrors.APIStatus); ok {
|
||||
return i.Status()
|
||||
}
|
||||
|
||||
// Just wrap in a status error.
|
||||
return metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusBadRequest,
|
||||
Reason: metav1.StatusReasonInvalid,
|
||||
Message: fmt.Sprintf(e.Error()),
|
||||
Details: &metav1.StatusDetails{
|
||||
Name: fmt.Sprintf("%v", e.Identifier),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Error indicating a resource does not exist. Used when attempting to delete or
|
||||
// udpate a non-existent resource.
|
||||
// update a non-existent resource.
|
||||
type ErrorResourceDoesNotExist struct {
|
||||
Err error
|
||||
Identifier interface{}
|
||||
@@ -100,7 +122,7 @@ func (e ErroredField) String() string {
|
||||
|
||||
func (e ErrorValidation) Error() string {
|
||||
if len(e.ErroredFields) == 0 {
|
||||
return fmt.Sprintf("unknown validation error: %v", e)
|
||||
return "unknown validation error"
|
||||
} else if len(e.ErroredFields) == 1 {
|
||||
f := e.ErroredFields[0]
|
||||
return fmt.Sprintf("error with field %s", f)
|
||||
@@ -133,9 +155,33 @@ func (e ErrorResourceUpdateConflict) Error() string {
|
||||
return fmt.Sprintf("update conflict: %v", e.Identifier)
|
||||
}
|
||||
|
||||
// Error indicating that the caller has attempted to release an IP address using
|
||||
// outdated information.
|
||||
type ErrorBadHandle struct {
|
||||
Requested string
|
||||
Expected string
|
||||
}
|
||||
|
||||
func (e ErrorBadHandle) Error() string {
|
||||
f := "the given handle (%s) does not match (%s) when attempting to release IP"
|
||||
return fmt.Sprintf(f, e.Requested, e.Expected)
|
||||
}
|
||||
|
||||
// Error indicating that the caller has attempted to release an IP address using
|
||||
// outdated information.
|
||||
type ErrorBadSequenceNumber struct {
|
||||
Requested uint64
|
||||
Expected uint64
|
||||
}
|
||||
|
||||
func (e ErrorBadSequenceNumber) Error() string {
|
||||
f := "the given sequence number (%d) does not match (%d) when attempting to release IP"
|
||||
return fmt.Sprintf(f, e.Requested, e.Expected)
|
||||
}
|
||||
|
||||
// Error indicating that the operation may have partially succeeded, then
|
||||
// failed, without rolling back. A common example is when a function failed
|
||||
// in an acceptable way after it succesfully wrote some data to the datastore.
|
||||
// in an acceptable way after it successfully wrote some data to the datastore.
|
||||
type ErrorPartialFailure struct {
|
||||
Err error
|
||||
}
|
||||
@@ -171,16 +217,6 @@ func UpdateErrorIdentifier(err error, id interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Error indicating the watcher has been terminated.
|
||||
type ErrorWatchTerminated struct {
|
||||
Err error
|
||||
ClosedByRemote bool
|
||||
}
|
||||
|
||||
func (e ErrorWatchTerminated) Error() string {
|
||||
return fmt.Sprintf("watch terminated (closedByRemote:%v): %v", e.ClosedByRemote, e.Err)
|
||||
}
|
||||
|
||||
// Error indicating the datastore has failed to parse an entry.
|
||||
type ErrorParsingDatastoreEntry struct {
|
||||
RawKey string
|
||||
@@ -191,3 +227,84 @@ type ErrorParsingDatastoreEntry struct {
|
||||
func (e ErrorParsingDatastoreEntry) Error() string {
|
||||
return fmt.Sprintf("failed to parse datastore entry key=%s; value=%s: %v", e.RawKey, e.RawValue, e.Err)
|
||||
}
|
||||
|
||||
type ErrorPolicyConversionRule struct {
|
||||
EgressRule *networkingv1.NetworkPolicyEgressRule
|
||||
IngressRule *networkingv1.NetworkPolicyIngressRule
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e ErrorPolicyConversionRule) String() string {
|
||||
var fieldString string
|
||||
|
||||
switch {
|
||||
case e.EgressRule != nil:
|
||||
fieldString = fmt.Sprintf("%+v", e.EgressRule)
|
||||
case e.IngressRule != nil:
|
||||
fieldString = fmt.Sprintf("%+v", e.IngressRule)
|
||||
default:
|
||||
fieldString = "unknown rule"
|
||||
}
|
||||
|
||||
if e.Reason != "" {
|
||||
fieldString = fmt.Sprintf("%s (%s)", fieldString, e.Reason)
|
||||
}
|
||||
|
||||
return fieldString
|
||||
}
|
||||
|
||||
type ErrorPolicyConversion struct {
|
||||
PolicyName string
|
||||
Rules []ErrorPolicyConversionRule
|
||||
}
|
||||
|
||||
func (e *ErrorPolicyConversion) BadEgressRule(rule *networkingv1.NetworkPolicyEgressRule, reason string) {
|
||||
// Copy rule
|
||||
badRule := *rule
|
||||
|
||||
e.Rules = append(e.Rules, ErrorPolicyConversionRule{
|
||||
EgressRule: &badRule,
|
||||
IngressRule: nil,
|
||||
Reason: reason,
|
||||
})
|
||||
}
|
||||
|
||||
func (e *ErrorPolicyConversion) BadIngressRule(
|
||||
rule *networkingv1.NetworkPolicyIngressRule, reason string) {
|
||||
// Copy rule
|
||||
badRule := *rule
|
||||
|
||||
e.Rules = append(e.Rules, ErrorPolicyConversionRule{
|
||||
EgressRule: nil,
|
||||
IngressRule: &badRule,
|
||||
Reason: reason,
|
||||
})
|
||||
}
|
||||
|
||||
func (e ErrorPolicyConversion) Error() string {
|
||||
s := fmt.Sprintf("policy: %s", e.PolicyName)
|
||||
|
||||
switch {
|
||||
case len(e.Rules) == 0:
|
||||
s += ": unknown policy conversion error"
|
||||
case len(e.Rules) == 1:
|
||||
f := e.Rules[0]
|
||||
|
||||
s += fmt.Sprintf(": error with rule %s", f)
|
||||
default:
|
||||
s += ": error with the following rules:\n"
|
||||
for _, f := range e.Rules {
|
||||
s += fmt.Sprintf("- %s\n", f)
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (e ErrorPolicyConversion) GetError() error {
|
||||
if len(e.Rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
27
vendor/github.com/projectcalico/calico/libcalico-go/lib/json/json.go
generated
vendored
Normal file
27
vendor/github.com/projectcalico/calico/libcalico-go/lib/json/json.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2022 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
// Marshal is a drop in replacement for encoding/json.Marshall, which uses jsoniter for better performance.
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
return jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(v)
|
||||
}
|
||||
|
||||
// Unmarshal is a drop in replacement for encoding/json.Unmarshal which uses jsoniter for better performance.
|
||||
func Unmarshal(data []byte, v any) error {
|
||||
return jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, v)
|
||||
}
|
||||
@@ -17,8 +17,9 @@ package names
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
// CIDRToName converts a CIDR to a valid resource name.
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
// WorkloadEndpointIdentifiers is a collection of identifiers that are used to uniquely
|
||||
@@ -40,10 +40,10 @@ import (
|
||||
// NameMatches() method. This is necessary because a prefix match may return endpoints
|
||||
// that do not exactly match the required identifiers. For example, suppose you are
|
||||
// querying endpoints with node=node1, orch=k8s, pod=pod and endpoints is wild carded:
|
||||
// - The name prefix would be `node1-k8s-pod-`
|
||||
// - A list query using that prefix would also return endpoints with, for example,
|
||||
// a pod call "pod-1", because the name of the endpoint might be `node1-k8s-pod--1-eth0`
|
||||
// which matches the required name prefix.
|
||||
// - The name prefix would be `node1-k8s-pod-`
|
||||
// - A list query using that prefix would also return endpoints with, for example,
|
||||
// a pod call "pod-1", because the name of the endpoint might be `node1-k8s-pod--1-eth0`
|
||||
// which matches the required name prefix.
|
||||
//
|
||||
// The Node and Orchestrator are always required for both prefix and non-prefix name
|
||||
// construction.
|
||||
@@ -236,7 +236,7 @@ var (
|
||||
// instance with fields populated according to the WorkloadEndpoint name format.
|
||||
func ParseWorkloadEndpointName(wepName string) (WorkloadEndpointIdentifiers, error) {
|
||||
if len(wepName) == 0 {
|
||||
return WorkloadEndpointIdentifiers{}, errors.New("Cannot parse emty string")
|
||||
return WorkloadEndpointIdentifiers{}, errors.New("Cannot parse empty string")
|
||||
}
|
||||
parts := extractParts(wepName)
|
||||
if parts == nil || len(parts) == 0 {
|
||||
48
vendor/github.com/projectcalico/calico/libcalico-go/lib/namespace/resource.go
generated
vendored
Normal file
48
vendor/github.com/projectcalico/calico/libcalico-go/lib/namespace/resource.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
|
||||
|
||||
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
// Re-implement the model constants here
|
||||
// to avoid an import loop.
|
||||
KindKubernetesNetworkPolicy = "KubernetesNetworkPolicy"
|
||||
KindKubernetesEndpointSlice = "KubernetesEndpointSlice"
|
||||
KindKubernetesService = "KubernetesService"
|
||||
)
|
||||
|
||||
func IsNamespaced(kind string) bool {
|
||||
switch kind {
|
||||
case libapiv3.KindWorkloadEndpoint, apiv3.KindNetworkPolicy, apiv3.KindNetworkSet:
|
||||
return true
|
||||
case KindKubernetesNetworkPolicy:
|
||||
// KindKubernetesNetworkPolicy is a special-case resource. We don't expose it over the
|
||||
// v3 API, but it is used in the felix syncer to implement the Kubernetes NetworkPolicy API.
|
||||
return true
|
||||
case KindKubernetesEndpointSlice:
|
||||
// KindKubernetesEndpointSlice is a special-case resource. We don't expose it over the
|
||||
// v3 API, but it is used in the felix syncer.
|
||||
return true
|
||||
case KindKubernetesService:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,9 +15,10 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"net"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/json"
|
||||
)
|
||||
|
||||
// Sub class net.IP so that we can add JSON marshalling and unmarshalling.
|
||||
@@ -91,7 +92,7 @@ func (i *IP) Network() *IPNet {
|
||||
return n
|
||||
}
|
||||
|
||||
// MustParseIP parses the string into a IP.
|
||||
// MustParseIP parses the string into an IP.
|
||||
func MustParseIP(i string) IP {
|
||||
var ip IP
|
||||
err := ip.UnmarshalText([]byte(i))
|
||||
@@ -114,12 +115,21 @@ func IPToBigInt(ip IP) *big.Int {
|
||||
}
|
||||
}
|
||||
|
||||
func BigIntToIP(ipInt *big.Int) IP {
|
||||
ip := IP{net.IP(ipInt.Bytes())}
|
||||
return ip
|
||||
func BigIntToIP(ipInt *big.Int, v6 bool) IP {
|
||||
var netIP net.IP
|
||||
// Older versions of this code tried to guess v4/v6 based on the length of the big.Int
|
||||
// but then we can't tell the difference between 0.0.0.0/0 and ::/0.
|
||||
if v6 {
|
||||
netIP = make(net.IP, 16)
|
||||
} else {
|
||||
netIP = make(net.IP, 4)
|
||||
}
|
||||
ipInt.FillBytes(netIP)
|
||||
return IP{netIP}
|
||||
}
|
||||
|
||||
func IncrementIP(ip IP, increment *big.Int) IP {
|
||||
expectingV6 := ip.To4() == nil
|
||||
sum := big.NewInt(0).Add(IPToBigInt(ip), increment)
|
||||
return BigIntToIP(sum)
|
||||
return BigIntToIP(sum, expectingV6)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
// Copyright (c) 2016-2017,2021 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,10 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"net"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/json"
|
||||
)
|
||||
|
||||
// Sub class net.IPNet so that we can add JSON marshalling and unmarshalling.
|
||||
@@ -61,6 +63,21 @@ func (i IPNet) IsNetOverlap(n net.IPNet) bool {
|
||||
return n.Contains(i.IP) || i.Contains(n.IP)
|
||||
}
|
||||
|
||||
// Covers returns true if the whole of n is covered by this CIDR.
|
||||
func (i IPNet) Covers(n net.IPNet) bool {
|
||||
if !i.Contains(n.IP) {
|
||||
return false
|
||||
} // else start of n is within our bounds, what about the end...
|
||||
nPrefixLen, _ := n.Mask.Size()
|
||||
iPrefixLen, _ := i.Mask.Size()
|
||||
return iPrefixLen <= nPrefixLen
|
||||
}
|
||||
|
||||
func (i IPNet) NthIP(n int) IP {
|
||||
bigN := big.NewInt(int64(n))
|
||||
return IncrementIP(IP{i.IP}, bigN)
|
||||
}
|
||||
|
||||
// Network returns the masked IP network.
|
||||
func (i *IPNet) Network() *IPNet {
|
||||
_, n, _ := ParseCIDR(i.String())
|
||||
@@ -118,7 +135,14 @@ func (i IPNet) String() string {
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
// MustParseNetwork parses the string into a IPNet. The IP address in the
|
||||
func (i IPNet) NumAddrs() *big.Int {
|
||||
ones, bits := i.Mask.Size()
|
||||
zeros := bits - ones
|
||||
numAddrs := big.NewInt(1)
|
||||
return numAddrs.Lsh(numAddrs, uint(zeros))
|
||||
}
|
||||
|
||||
// MustParseNetwork parses the string into an IPNet. The IP address in the
|
||||
// IPNet is masked.
|
||||
func MustParseNetwork(c string) IPNet {
|
||||
_, cidr, err := ParseCIDR(c)
|
||||
@@ -128,7 +152,7 @@ func MustParseNetwork(c string) IPNet {
|
||||
return *cidr
|
||||
}
|
||||
|
||||
// MustParseCIDR parses the string into a IPNet. The IP address in the
|
||||
// MustParseCIDR parses the string into an IPNet. The IP address in the
|
||||
// IPNet is not masked.
|
||||
func MustParseCIDR(c string) IPNet {
|
||||
ip, cidr, err := ParseCIDR(c)
|
||||
@@ -15,8 +15,9 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/projectcalico/calico/libcalico-go/lib/json"
|
||||
)
|
||||
|
||||
// Sub class net.HardwareAddr so that we can add JSON marshalling and unmarshalling.
|
||||
166
vendor/github.com/projectcalico/calico/libcalico-go/lib/set/boxed.go
generated
vendored
Normal file
166
vendor/github.com/projectcalico/calico/libcalico-go/lib/set/boxed.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright (c) 2016-2022 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package set
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewBoxed creates a new "boxed" Set, where the items stored in the set are boxed inside an interface. The values
|
||||
// placed into the set must be comparable (i.e. suitable for use as a map key). This is checked at runtime and the
|
||||
// code will panic on trying to add a non-comparable entry.
|
||||
//
|
||||
// This implementation exists because Go's generics currently have a gap. The type set of the "comparable"
|
||||
// constraint currently doesn't include interface types, which under Go's normal rules _are_ comparable (but may
|
||||
// panic at runtime if the interface happens to contain a non-comparable object). If possible use a typed map
|
||||
// via New() or From(); use this if you really need a Set[any] or Set[SomeInterface].
|
||||
func NewBoxed[T any]() Boxed[T] {
|
||||
return make(Boxed[T])
|
||||
}
|
||||
|
||||
func FromBoxed[T any](members ...T) Boxed[T] {
|
||||
s := NewBoxed[T]()
|
||||
s.AddAll(members)
|
||||
return s
|
||||
}
|
||||
|
||||
func FromArrayBoxed[T any](membersArray []T) Boxed[T] {
|
||||
s := NewBoxed[T]()
|
||||
s.AddAll(membersArray)
|
||||
return s
|
||||
}
|
||||
|
||||
func Empty[T any]() Set[T] {
|
||||
return (Boxed[T])(nil)
|
||||
}
|
||||
|
||||
type Boxed[T any] map[any]v
|
||||
|
||||
func (set Boxed[T]) String() string {
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.WriteString("set.Set{")
|
||||
first := true
|
||||
set.Iter(func(item T) error {
|
||||
if !first {
|
||||
buf.WriteString(",")
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
_, _ = fmt.Fprint(&buf, item)
|
||||
return nil
|
||||
})
|
||||
_, _ = buf.WriteString("}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Len() int {
|
||||
return len(set)
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Add(item T) {
|
||||
set[item] = emptyValue
|
||||
}
|
||||
|
||||
func (set Boxed[T]) AddAll(itemArray []T) {
|
||||
for _, v := range itemArray {
|
||||
set.Add(v)
|
||||
}
|
||||
}
|
||||
|
||||
// AddSet adds the contents of set "other" into the set.
|
||||
func (set Boxed[T]) AddSet(other Set[T]) {
|
||||
other.Iter(func(item T) error {
|
||||
set.Add(item)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Discard(item T) {
|
||||
delete(set, item)
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Clear() {
|
||||
for item := range set {
|
||||
delete(set, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Contains(item T) bool {
|
||||
_, present := set[item]
|
||||
return present
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Iter(visitor func(item T) error) {
|
||||
loop:
|
||||
for item := range set {
|
||||
item := item.(T)
|
||||
err := visitor(item)
|
||||
switch err {
|
||||
case StopIteration:
|
||||
break loop
|
||||
case RemoveItem:
|
||||
delete(set, item)
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
log.WithError(err).Panic("Unexpected iteration error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Copy() Set[T] {
|
||||
cpy := NewBoxed[T]()
|
||||
for item := range set {
|
||||
item := item.(T)
|
||||
cpy.Add(item)
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Slice() (s []T) {
|
||||
for item := range set {
|
||||
item := item.(T)
|
||||
s = append(s, item)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (set Boxed[T]) Equals(other Set[T]) bool {
|
||||
if set.Len() != other.Len() {
|
||||
return false
|
||||
}
|
||||
for item := range set {
|
||||
item := item.(T)
|
||||
if !other.Contains(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set Boxed[T]) ContainsAll(other Set[T]) bool {
|
||||
result := true
|
||||
other.Iter(func(item T) error {
|
||||
if !set.Contains(item) {
|
||||
result = false
|
||||
return StopIteration
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return result
|
||||
}
|
||||
42
vendor/github.com/projectcalico/calico/libcalico-go/lib/set/interface.go
generated
vendored
Normal file
42
vendor/github.com/projectcalico/calico/libcalico-go/lib/set/interface.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package set
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Set[T any] interface {
|
||||
Len() int
|
||||
Add(T)
|
||||
AddAll(itemArray []T)
|
||||
AddSet(other Set[T])
|
||||
Discard(T)
|
||||
Clear()
|
||||
Contains(T) bool
|
||||
Iter(func(item T) error)
|
||||
Copy() Set[T]
|
||||
Equals(Set[T]) bool
|
||||
ContainsAll(Set[T]) bool
|
||||
Slice() []T
|
||||
fmt.Stringer
|
||||
}
|
||||
|
||||
var (
|
||||
StopIteration = errors.New("stop iteration")
|
||||
RemoveItem = errors.New("remove item")
|
||||
)
|
||||
|
||||
type v struct{}
|
||||
|
||||
var emptyValue = v{}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user